l 149 arch/alpha/include/asm/atomic.h ATOMIC_OP(op, op##l) \ l 150 arch/alpha/include/asm/atomic.h ATOMIC_OP_RETURN(op, op##l) \ l 151 arch/alpha/include/asm/atomic.h ATOMIC_FETCH_OP(op, op##l) \ l 197 arch/alpha/include/asm/core_lca.h struct el_lca_mcheck_long * l; l 592 arch/alpha/include/asm/core_t2.h IOPORT(l, 32) l 213 arch/alpha/include/asm/hwrpb.h unsigned long sum = 0, *l; l 214 arch/alpha/include/asm/hwrpb.h for (l = (unsigned long *) h; l < (unsigned long *) &h->chksum; ++l) l 215 arch/alpha/include/asm/hwrpb.h sum += *l; l 325 arch/alpha/include/asm/jensen.h IOPORT(l, 32) l 14 arch/alpha/include/asm/local.h #define local_read(l) atomic_long_read(&(l)->a) l 15 arch/alpha/include/asm/local.h #define local_set(l,i) atomic_long_set(&(l)->a, (i)) l 16 arch/alpha/include/asm/local.h #define local_inc(l) atomic_long_inc(&(l)->a) l 17 arch/alpha/include/asm/local.h #define local_dec(l) atomic_long_dec(&(l)->a) l 18 arch/alpha/include/asm/local.h #define local_add(i,l) atomic_long_add((i),(&(l)->a)) l 19 arch/alpha/include/asm/local.h #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) l 21 arch/alpha/include/asm/local.h static __inline__ long local_add_return(long i, local_t * l) l 33 arch/alpha/include/asm/local.h :"=&r" (temp), "=m" (l->a.counter), "=&r" (result) l 34 arch/alpha/include/asm/local.h :"Ir" (i), "m" (l->a.counter) : "memory"); l 38 arch/alpha/include/asm/local.h static __inline__ long local_sub_return(long i, local_t * l) l 50 arch/alpha/include/asm/local.h :"=&r" (temp), "=m" (l->a.counter), "=&r" (result) l 51 arch/alpha/include/asm/local.h :"Ir" (i), "m" (l->a.counter) : "memory"); l 55 arch/alpha/include/asm/local.h #define local_cmpxchg(l, o, n) \ l 56 arch/alpha/include/asm/local.h (cmpxchg_local(&((l)->a.counter), (o), (n))) l 57 arch/alpha/include/asm/local.h #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) l 68 arch/alpha/include/asm/local.h #define local_add_unless(l, a, u) \ l 71 arch/alpha/include/asm/local.h c = local_read(l); \ l 75 arch/alpha/include/asm/local.h old = local_cmpxchg((l), c, c + (a)); \ l 82 arch/alpha/include/asm/local.h #define local_inc_not_zero(l) local_add_unless((l), 1, 0) l 84 arch/alpha/include/asm/local.h #define local_add_negative(a, l) (local_add_return((a), (l)) < 0) l 86 arch/alpha/include/asm/local.h #define local_dec_return(l) local_sub_return(1,(l)) l 88 arch/alpha/include/asm/local.h #define local_inc_return(l) local_add_return(1,(l)) l 90 arch/alpha/include/asm/local.h #define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0) l 92 arch/alpha/include/asm/local.h #define local_inc_and_test(l) (local_add_return(1, (l)) == 0) l 94 arch/alpha/include/asm/local.h #define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) l 97 arch/alpha/include/asm/local.h #define __local_inc(l) ((l)->a.counter++) l 98 arch/alpha/include/asm/local.h #define __local_dec(l) ((l)->a.counter++) l 99 arch/alpha/include/asm/local.h #define __local_add(i,l) ((l)->a.counter+=(i)) l 100 arch/alpha/include/asm/local.h #define __local_sub(i,l) ((l)->a.counter-=(i)) l 448 arch/alpha/kernel/core_lca.c el.l->pt[0], el.l->exc_addr, el.l->dc_stat); l 449 arch/alpha/kernel/core_lca.c printk(KERN_CRIT " car: %#lx\n", el.l->car); l 450 arch/alpha/kernel/core_lca.c if (el.l->esr & ESR_EAV) { l 451 arch/alpha/kernel/core_lca.c mem_error(el.l->esr, el.l->ear); l 453 arch/alpha/kernel/core_lca.c if (el.l->ioc_stat0 & IOC_ERR) { l 454 arch/alpha/kernel/core_lca.c ioc_error(el.l->ioc_stat0, el.l->ioc_stat1); l 129 arch/arc/include/asm/io.h __raw_readsx(32, l) l 190 arch/arc/include/asm/io.h __raw_writesx(32, l) l 207 arch/arc/include/asm/io.h #define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); }) l 208 arch/arc/include/asm/io.h #define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); }) l 209 arch/arc/include/asm/io.h #define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); }) l 214 arch/arc/include/asm/io.h #define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); }) l 215 arch/arc/include/asm/io.h #define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); }) l 216 arch/arc/include/asm/io.h #define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); }) l 22 arch/arc/include/asm/processor.h unsigned int l, h; l 32 arch/arc/kernel/fpu.c unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l; l 33 arch/arc/kernel/fpu.c unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l; l 112 arch/arm/crypto/crc32-ce-glue.c unsigned int l; l 116 arch/arm/crypto/crc32-ce-glue.c l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); l 118 arch/arm/crypto/crc32-ce-glue.c *crc = fallback_crc32(*crc, data, l); l 120 arch/arm/crypto/crc32-ce-glue.c data += l; l 121 arch/arm/crypto/crc32-ce-glue.c length -= l; l 125 arch/arm/crypto/crc32-ce-glue.c l = round_down(length, SCALE_F); l 128 arch/arm/crypto/crc32-ce-glue.c *crc = crc32_pmull_le(data, l, *crc); l 131 arch/arm/crypto/crc32-ce-glue.c data += l; l 132 arch/arm/crypto/crc32-ce-glue.c length -= l; l 146 arch/arm/crypto/crc32-ce-glue.c unsigned int l; l 150 arch/arm/crypto/crc32-ce-glue.c l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); l 152 arch/arm/crypto/crc32-ce-glue.c *crc = fallback_crc32c(*crc, data, l); l 154 arch/arm/crypto/crc32-ce-glue.c data += l; l 155 arch/arm/crypto/crc32-ce-glue.c length -= l; l 159 arch/arm/crypto/crc32-ce-glue.c l = round_down(length, SCALE_F); l 162 arch/arm/crypto/crc32-ce-glue.c *crc = crc32c_pmull_le(data, l, *crc); l 165 arch/arm/crypto/crc32-ce-glue.c data += l; l 166 arch/arm/crypto/crc32-ce-glue.c length -= l; l 277 arch/arm/include/asm/arch_gicv3.h #define gic_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) l 243 arch/arm/include/asm/assembler.h #define USERL(l, x...) \ l 247 arch/arm/include/asm/assembler.h .long 9999b,l; \ l 9 arch/arm/include/asm/dmi.h #define dmi_early_remap(x, l) memremap(x, l, MEMREMAP_WB) l 10 arch/arm/include/asm/dmi.h #define dmi_early_unmap(x, l) memunmap(x) l 11 arch/arm/include/asm/dmi.h #define dmi_remap(x, l) memremap(x, l, MEMREMAP_WB) l 13 arch/arm/include/asm/dmi.h #define dmi_alloc(l) kzalloc(l, GFP_KERNEL) l 144 arch/arm/include/asm/glue-cache.h static inline void nop_dma_map_area(const void *s, size_t l, int f) { } l 145 arch/arm/include/asm/glue-cache.h static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } l 264 arch/arm/include/asm/io.h #define outsb(p,d,l) __raw_writesb(__io(p),d,l) l 265 arch/arm/include/asm/io.h #define outsw(p,d,l) __raw_writesw(__io(p),d,l) l 266 arch/arm/include/asm/io.h #define outsl(p,d,l) __raw_writesl(__io(p),d,l) l 268 arch/arm/include/asm/io.h #define insb(p,d,l) __raw_readsb(__io(p),d,l) l 269 arch/arm/include/asm/io.h #define insw(p,d,l) __raw_readsw(__io(p),d,l) l 270 arch/arm/include/asm/io.h #define insl(p,d,l) __raw_readsl(__io(p),d,l) l 309 arch/arm/include/asm/io.h #define readsb(p,d,l) __raw_readsb(p,d,l) l 310 arch/arm/include/asm/io.h #define readsw(p,d,l) __raw_readsw(p,d,l) l 311 arch/arm/include/asm/io.h #define readsl(p,d,l) __raw_readsl(p,d,l) l 313 arch/arm/include/asm/io.h #define writesb(p,d,l) __raw_writesb(p,d,l) l 314 arch/arm/include/asm/io.h #define writesw(p,d,l) __raw_writesw(p,d,l) l 315 arch/arm/include/asm/io.h #define writesl(p,d,l) __raw_writesl(p,d,l) l 343 arch/arm/include/asm/io.h #define memset_io(c,v,l) _memset_io(c,(v),(l)) l 344 arch/arm/include/asm/io.h #define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l)) l 345 arch/arm/include/asm/io.h #define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l)) l 204 arch/arm/include/asm/kvm_mmu.h #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) l 19 arch/arm/kernel/early_printk.c unsigned l = min(n, sizeof(buf)-1); l 20 arch/arm/kernel/early_printk.c memcpy(buf, s, l); l 21 arch/arm/kernel/early_printk.c buf[l] = 0; l 22 arch/arm/kernel/early_printk.c s += l; l 23 arch/arm/kernel/early_printk.c n -= l; l 160 arch/arm/kernel/setup.c static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; l 161 arch/arm/kernel/setup.c #define ENDIANNESS ((char)endian_test.l) l 37 arch/arm/mach-imx/iomux-imx31.c u32 l; l 47 arch/arm/mach-imx/iomux-imx31.c l = imx_readl(reg); l 48 arch/arm/mach-imx/iomux-imx31.c l &= ~(0xff << (field * 8)); l 49 arch/arm/mach-imx/iomux-imx31.c l |= mode << (field * 8); l 50 arch/arm/mach-imx/iomux-imx31.c imx_writel(l, reg); l 60 arch/arm/mach-imx/iomux-imx31.c u32 field, l; l 72 arch/arm/mach-imx/iomux-imx31.c l = imx_readl(reg); l 73 arch/arm/mach-imx/iomux-imx31.c l &= ~(0x1ff << (field * 10)); l 74 arch/arm/mach-imx/iomux-imx31.c l |= config << (field * 10); l 75 arch/arm/mach-imx/iomux-imx31.c imx_writel(l, reg); l 150 arch/arm/mach-imx/iomux-imx31.c u32 l; l 153 arch/arm/mach-imx/iomux-imx31.c l = imx_readl(IOMUXGPR); l 155 arch/arm/mach-imx/iomux-imx31.c l |= gp; l 157 arch/arm/mach-imx/iomux-imx31.c l &= ~gp; l 159 arch/arm/mach-imx/iomux-imx31.c imx_writel(l, IOMUXGPR); l 62 arch/arm/mach-ixp4xx/include/mach/io.h #define writesb(p, v, l) __indirect_writesb(p, v, l) l 63 arch/arm/mach-ixp4xx/include/mach/io.h #define writesw(p, v, l) __indirect_writesw(p, v, l) l 64 arch/arm/mach-ixp4xx/include/mach/io.h #define writesl(p, v, l) __indirect_writesl(p, v, l) l 74 arch/arm/mach-ixp4xx/include/mach/io.h #define readsb(p, v, l) __indirect_readsb(p, v, l) l 75 arch/arm/mach-ixp4xx/include/mach/io.h #define readsw(p, v, l) __indirect_readsw(p, v, l) l 76 arch/arm/mach-ixp4xx/include/mach/io.h #define readsl(p, v, l) __indirect_readsl(p, v, l) l 225 arch/arm/mach-ixp4xx/include/mach/io.h #define memset_io(c,v,l) _memset_io((c),(v),(l)) l 226 arch/arm/mach-ixp4xx/include/mach/io.h #define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l)) l 227 arch/arm/mach-ixp4xx/include/mach/io.h #define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l)) l 74 arch/arm/mach-mvebu/board-v7.c int l; l 79 arch/arm/mach-mvebu/board-v7.c reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); l 81 arch/arm/mach-mvebu/board-v7.c reg = of_get_flat_dt_prop(node, "reg", &l); l 85 arch/arm/mach-mvebu/board-v7.c endp = reg + (l / sizeof(__be32)); l 257 arch/arm/mach-omap1/board-osk.c u32 l; l 265 arch/arm/mach-omap1/board-osk.c l = omap_readl(EMIFS_CCS(1)); l 266 arch/arm/mach-omap1/board-osk.c l |= 0x3; l 267 arch/arm/mach-omap1/board-osk.c omap_writel(l, EMIFS_CCS(1)); l 558 arch/arm/mach-omap1/board-osk.c u32 l; l 568 arch/arm/mach-omap1/board-osk.c l = omap_readl(EMIFS_CCS(3)); l 569 arch/arm/mach-omap1/board-osk.c if (l != EMIFS_CS3_VAL) l 580 arch/arm/mach-omap1/board-osk.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 581 arch/arm/mach-omap1/board-osk.c l |= (3 << 1); l 582 arch/arm/mach-omap1/board-osk.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 365 arch/arm/mach-omap1/clock.c u32 l; l 376 arch/arm/mach-omap1/clock.c l = omap_readl(MOD_CONF_CTRL_1); l 377 arch/arm/mach-omap1/clock.c l &= ~(7 << 17); l 378 arch/arm/mach-omap1/clock.c l |= div << 17; l 379 arch/arm/mach-omap1/clock.c omap_writel(l, MOD_CONF_CTRL_1); l 212 arch/arm/mach-omap1/dma.c u32 l; l 214 arch/arm/mach-omap1/dma.c l = dma_read(CCR, lch); l 215 arch/arm/mach-omap1/dma.c l &= ~OMAP_DMA_CCR_EN; l 216 arch/arm/mach-omap1/dma.c dma_write(l, CCR, lch); l 219 arch/arm/mach-omap1/dma.c l = dma_read(CSR, lch); l 17 arch/arm/mach-omap1/flash.c u32 l; l 19 arch/arm/mach-omap1/flash.c l = omap_readl(EMIFS_CONFIG); l 21 arch/arm/mach-omap1/flash.c l |= OMAP_EMIFS_CONFIG_WP; l 23 arch/arm/mach-omap1/flash.c l &= ~OMAP_EMIFS_CONFIG_WP; l 24 arch/arm/mach-omap1/flash.c omap_writel(l, EMIFS_CONFIG); l 49 arch/arm/mach-omap1/timer.c u32 l; l 51 arch/arm/mach-omap1/timer.c l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); l 52 arch/arm/mach-omap1/timer.c l |= source << n; l 53 arch/arm/mach-omap1/timer.c omap_writel(l, MOD_CONF_CTRL_1); l 294 arch/arm/mach-omap1/usb.c u32 l; l 297 arch/arm/mach-omap1/usb.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 298 arch/arm/mach-omap1/usb.c l &= ~(3 << 1); l 299 arch/arm/mach-omap1/usb.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 316 arch/arm/mach-omap1/usb.c u32 l; l 334 arch/arm/mach-omap1/usb.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 335 arch/arm/mach-omap1/usb.c l &= ~(7 << 4); l 337 arch/arm/mach-omap1/usb.c l |= (3 << 1); l 338 arch/arm/mach-omap1/usb.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 362 arch/arm/mach-omap1/usb.c u32 l; l 364 arch/arm/mach-omap1/usb.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 365 arch/arm/mach-omap1/usb.c l &= ~CONF_USB2_UNI_R; l 366 arch/arm/mach-omap1/usb.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 379 arch/arm/mach-omap1/usb.c u32 l; l 383 arch/arm/mach-omap1/usb.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 384 arch/arm/mach-omap1/usb.c l |= CONF_USB2_UNI_R; l 385 arch/arm/mach-omap1/usb.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 401 arch/arm/mach-omap1/usb.c u32 l; l 403 arch/arm/mach-omap1/usb.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 404 arch/arm/mach-omap1/usb.c l &= ~CONF_USB1_UNI_R; l 405 arch/arm/mach-omap1/usb.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 446 arch/arm/mach-omap1/usb.c u32 l; l 448 arch/arm/mach-omap1/usb.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 449 arch/arm/mach-omap1/usb.c l |= CONF_USB1_UNI_R; l 450 arch/arm/mach-omap1/usb.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 471 arch/arm/mach-omap1/usb.c u32 l; l 473 arch/arm/mach-omap1/usb.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 474 arch/arm/mach-omap1/usb.c l &= ~CONF_USB2_UNI_R; l 475 arch/arm/mach-omap1/usb.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 517 arch/arm/mach-omap1/usb.c u32 l; l 521 arch/arm/mach-omap1/usb.c l = omap_readl(USB_TRANSCEIVER_CTRL); l 522 arch/arm/mach-omap1/usb.c l |= CONF_USB2_UNI_R; l 523 arch/arm/mach-omap1/usb.c omap_writel(l, USB_TRANSCEIVER_CTRL); l 336 arch/arm/mach-omap2/cm2xxx.c u32 l; l 339 arch/arm/mach-omap2/cm2xxx.c l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); l 340 arch/arm/mach-omap2/cm2xxx.c if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK | l 345 arch/arm/mach-omap2/cm2xxx.c l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2); l 346 arch/arm/mach-omap2/cm2xxx.c if (l & OMAP24XX_EN_UART3_MASK) l 216 arch/arm/mach-omap2/control.c u32 l; l 218 arch/arm/mach-omap2/control.c l = ('B' << 24) | ('M' << 16) | bootmode; l 229 arch/arm/mach-omap2/control.c writel_relaxed(l, OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD + 4)); l 64 arch/arm/mach-omap2/pm24xx.c u32 l; l 83 arch/arm/mach-omap2/pm24xx.c l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; l 84 arch/arm/mach-omap2/pm24xx.c omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); l 250 arch/arm/mach-omap2/pm24xx.c u32 l; l 253 arch/arm/mach-omap2/pm24xx.c l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET); l 254 arch/arm/mach-omap2/pm24xx.c printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); l 130 arch/arm/mach-omap2/sdrc.c u32 l; l 132 arch/arm/mach-omap2/sdrc.c l = sms_read_reg(SMS_SYSCONFIG); l 133 arch/arm/mach-omap2/sdrc.c l &= ~(0x3 << 3); l 134 arch/arm/mach-omap2/sdrc.c l |= (0x2 << 3); l 135 arch/arm/mach-omap2/sdrc.c sms_write_reg(l, SMS_SYSCONFIG); l 137 arch/arm/mach-omap2/sdrc.c l = sdrc_read_reg(SDRC_SYSCONFIG); l 138 arch/arm/mach-omap2/sdrc.c l &= ~(0x3 << 3); l 139 arch/arm/mach-omap2/sdrc.c l |= (0x2 << 3); l 140 arch/arm/mach-omap2/sdrc.c sdrc_write_reg(l, SDRC_SYSCONFIG); l 150 arch/arm/mach-omap2/sdrc.c l = (1 << SDRC_POWER_EXTCLKDIS_SHIFT) | l 152 arch/arm/mach-omap2/sdrc.c sdrc_write_reg(l, SDRC_POWER); l 77 arch/arm/mach-s3c24xx/include/mach/io.h DECLARE_IO(int,l,"") l 204 arch/arm/mach-s3c24xx/include/mach/io.h #define insb(p,d,l) __raw_readsb(__ioaddr(p),d,l) l 205 arch/arm/mach-s3c24xx/include/mach/io.h #define insw(p,d,l) __raw_readsw(__ioaddr(p),d,l) l 206 arch/arm/mach-s3c24xx/include/mach/io.h #define insl(p,d,l) __raw_readsl(__ioaddr(p),d,l) l 208 arch/arm/mach-s3c24xx/include/mach/io.h #define outsb(p,d,l) __raw_writesb(__ioaddr(p),d,l) l 209 arch/arm/mach-s3c24xx/include/mach/io.h #define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l) l 210 arch/arm/mach-s3c24xx/include/mach/io.h #define outsl(p,d,l) __raw_writesl(__ioaddr(p),d,l) l 136 arch/arm/mach-shmobile/setup-rcar-gen2.c int l; l 144 arch/arm/mach-shmobile/setup-rcar-gen2.c reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); l 146 arch/arm/mach-shmobile/setup-rcar-gen2.c reg = of_get_flat_dt_prop(node, "reg", &l); l 150 arch/arm/mach-shmobile/setup-rcar-gen2.c endp = reg + (l / sizeof(__be32)); l 158 arch/arm/plat-omap/dma.c u32 l; l 160 arch/arm/plat-omap/dma.c l = omap_readl(reg); l 161 arch/arm/plat-omap/dma.c l &= ~(0x3f << shift); l 162 arch/arm/plat-omap/dma.c l |= (dev - 1) << shift; l 163 arch/arm/plat-omap/dma.c omap_writel(l, reg); l 175 arch/arm/plat-omap/dma.c u32 l; l 195 arch/arm/plat-omap/dma.c l = omap_readl(reg); l 196 arch/arm/plat-omap/dma.c l &= ~(0xf << 8); l 197 arch/arm/plat-omap/dma.c l |= (priority & 0xf) << 8; l 198 arch/arm/plat-omap/dma.c omap_writel(l, reg); l 222 arch/arm/plat-omap/dma.c u32 l; l 224 arch/arm/plat-omap/dma.c l = p->dma_read(CSDP, lch); l 225 arch/arm/plat-omap/dma.c l &= ~0x03; l 226 arch/arm/plat-omap/dma.c l |= data_type; l 227 arch/arm/plat-omap/dma.c p->dma_write(l, CSDP, lch); l 297 arch/arm/plat-omap/dma.c u32 l; l 299 arch/arm/plat-omap/dma.c l = p->dma_read(LCH_CTRL, lch); l 300 arch/arm/plat-omap/dma.c l &= ~0x7; l 301 arch/arm/plat-omap/dma.c l |= mode; l 302 arch/arm/plat-omap/dma.c p->dma_write(l, LCH_CTRL, lch); l 312 arch/arm/plat-omap/dma.c u32 l; l 323 arch/arm/plat-omap/dma.c l = p->dma_read(CCR, lch); l 324 arch/arm/plat-omap/dma.c l &= ~(0x03 << 12); l 325 arch/arm/plat-omap/dma.c l |= src_amode << 12; l 326 arch/arm/plat-omap/dma.c p->dma_write(l, CCR, lch); l 356 arch/arm/plat-omap/dma.c u32 l; l 358 arch/arm/plat-omap/dma.c l = p->dma_read(CSDP, lch); l 359 arch/arm/plat-omap/dma.c l &= ~(1 << 6); l 361 arch/arm/plat-omap/dma.c l |= (1 << 6); l 362 arch/arm/plat-omap/dma.c p->dma_write(l, CSDP, lch); l 369 arch/arm/plat-omap/dma.c u32 l; l 371 arch/arm/plat-omap/dma.c l = p->dma_read(CSDP, lch); l 372 arch/arm/plat-omap/dma.c l &= ~(0x03 << 7); l 404 arch/arm/plat-omap/dma.c l |= (burst << 7); l 405 arch/arm/plat-omap/dma.c p->dma_write(l, CSDP, lch); l 414 arch/arm/plat-omap/dma.c u32 l; l 417 arch/arm/plat-omap/dma.c l = p->dma_read(CSDP, lch); l 418 arch/arm/plat-omap/dma.c l &= ~(0x1f << 9); l 419 arch/arm/plat-omap/dma.c l |= dest_port << 9; l 420 arch/arm/plat-omap/dma.c p->dma_write(l, CSDP, lch); l 423 arch/arm/plat-omap/dma.c l = p->dma_read(CCR, lch); l 424 arch/arm/plat-omap/dma.c l &= ~(0x03 << 14); l 425 arch/arm/plat-omap/dma.c l |= dest_amode << 14; l 426 arch/arm/plat-omap/dma.c p->dma_write(l, CCR, lch); l 437 arch/arm/plat-omap/dma.c u32 l; l 439 arch/arm/plat-omap/dma.c l = p->dma_read(CSDP, lch); l 440 arch/arm/plat-omap/dma.c l &= ~(1 << 13); l 442 arch/arm/plat-omap/dma.c l |= 1 << 13; l 443 arch/arm/plat-omap/dma.c p->dma_write(l, CSDP, lch); l 450 arch/arm/plat-omap/dma.c u32 l; l 452 arch/arm/plat-omap/dma.c l = p->dma_read(CSDP, lch); l 453 arch/arm/plat-omap/dma.c l &= ~(0x03 << 14); l 482 arch/arm/plat-omap/dma.c l |= (burst << 14); l 483 arch/arm/plat-omap/dma.c p->dma_write(l, CSDP, lch); l 524 arch/arm/plat-omap/dma.c u32 l; l 526 arch/arm/plat-omap/dma.c l = p->dma_read(CLNK_CTRL, lch); l 529 arch/arm/plat-omap/dma.c l &= ~(1 << 14); l 533 arch/arm/plat-omap/dma.c l = dma_chan[lch].next_lch | (1 << 15); l 538 arch/arm/plat-omap/dma.c l = dma_chan[lch].next_linked_ch | (1 << 15); l 541 arch/arm/plat-omap/dma.c p->dma_write(l, CLNK_CTRL, lch); l 546 arch/arm/plat-omap/dma.c u32 l; l 548 arch/arm/plat-omap/dma.c l = p->dma_read(CLNK_CTRL, lch); l 555 arch/arm/plat-omap/dma.c l |= 1 << 14; l 560 arch/arm/plat-omap/dma.c l &= ~(1 << 15); l 563 arch/arm/plat-omap/dma.c p->dma_write(l, CLNK_CTRL, lch); l 760 arch/arm/plat-omap/dma.c u32 l; l 766 arch/arm/plat-omap/dma.c l = p->dma_read(CCR, lch); l 767 arch/arm/plat-omap/dma.c l &= ~((1 << 6) | (1 << 26)); l 769 arch/arm/plat-omap/dma.c l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); l 771 arch/arm/plat-omap/dma.c l |= ((read_prio & 0x1) << 6); l 773 arch/arm/plat-omap/dma.c p->dma_write(l, CCR, lch); l 794 arch/arm/plat-omap/dma.c u32 l; l 835 arch/arm/plat-omap/dma.c l = p->dma_read(CCR, lch); l 838 arch/arm/plat-omap/dma.c l |= OMAP_DMA_CCR_BUFFERING_DISABLE; l 839 arch/arm/plat-omap/dma.c l |= OMAP_DMA_CCR_EN; l 848 arch/arm/plat-omap/dma.c p->dma_write(l, CCR, lch); l 856 arch/arm/plat-omap/dma.c u32 l; l 861 arch/arm/plat-omap/dma.c l = p->dma_read(CCR, lch); l 863 arch/arm/plat-omap/dma.c (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { l 868 arch/arm/plat-omap/dma.c l = p->dma_read(OCP_SYSCONFIG, lch); l 869 arch/arm/plat-omap/dma.c sys_cf = l; l 870 arch/arm/plat-omap/dma.c l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK; l 871 arch/arm/plat-omap/dma.c l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); l 872 arch/arm/plat-omap/dma.c p->dma_write(l , OCP_SYSCONFIG, 0); l 874 arch/arm/plat-omap/dma.c l = p->dma_read(CCR, lch); l 875 arch/arm/plat-omap/dma.c l &= ~OMAP_DMA_CCR_EN; l 876 arch/arm/plat-omap/dma.c p->dma_write(l, CCR, lch); l 879 arch/arm/plat-omap/dma.c l = p->dma_read(CCR, lch); l 880 arch/arm/plat-omap/dma.c while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE | l 884 arch/arm/plat-omap/dma.c l = p->dma_read(CCR, lch); l 891 arch/arm/plat-omap/dma.c l &= ~OMAP_DMA_CCR_EN; l 892 arch/arm/plat-omap/dma.c p->dma_write(l, CCR, lch); l 71 arch/arm64/crypto/aes-ce-ccm-glue.c u32 l = req->iv[0] + 1; l 74 arch/arm64/crypto/aes-ce-ccm-glue.c if (l < 2 || l > 8) l 78 arch/arm64/crypto/aes-ce-ccm-glue.c if (l < 4 && msglen >> (8 * l)) l 88 arch/arm64/crypto/aes-ce-ccm-glue.c memcpy(maciv, req->iv, AES_BLOCK_SIZE - l); l 101 arch/arm64/crypto/aes-ce-ccm-glue.c memset(&req->iv[AES_BLOCK_SIZE - l], 0, l); l 144 arch/arm64/crypto/aes-ce-ccm-glue.c struct __packed { __be16 l; __be32 h; u16 len; } ltag; l 151 arch/arm64/crypto/aes-ce-ccm-glue.c ltag.l = cpu_to_be16(len); l 154 arch/arm64/crypto/aes-ce-ccm-glue.c ltag.l = cpu_to_be16(0xfffe); l 30 arch/arm64/crypto/aes-ce-glue.c asmlinkage u32 __aes_ce_sub(u32 l); l 902 arch/arm64/crypto/aes-glue.c unsigned int l; l 923 arch/arm64/crypto/aes-glue.c l = min(len, AES_BLOCK_SIZE - ctx->len); l 925 arch/arm64/crypto/aes-glue.c if (l <= AES_BLOCK_SIZE) { l 926 arch/arm64/crypto/aes-glue.c crypto_xor(ctx->dg + ctx->len, p, l); l 927 arch/arm64/crypto/aes-glue.c ctx->len += l; l 928 arch/arm64/crypto/aes-glue.c len -= l; l 929 arch/arm64/crypto/aes-glue.c p += l; l 43 arch/arm64/crypto/chacha-neon-glue.c int l = min(bytes, CHACHA_BLOCK_SIZE * 5); l 45 arch/arm64/crypto/chacha-neon-glue.c if (l <= CHACHA_BLOCK_SIZE) { l 48 arch/arm64/crypto/chacha-neon-glue.c memcpy(buf, src, l); l 50 arch/arm64/crypto/chacha-neon-glue.c memcpy(dst, buf, l); l 54 arch/arm64/crypto/chacha-neon-glue.c chacha_4block_xor_neon(state, dst, src, nrounds, l); l 233 arch/arm64/include/asm/alternative.h .macro uao_ldp l, reg1, reg2, addr, post_inc l 248 arch/arm64/include/asm/alternative.h .macro uao_stp l, reg1, reg2, addr, post_inc l 263 arch/arm64/include/asm/alternative.h .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc l 275 arch/arm64/include/asm/alternative.h .macro uao_ldp l, reg1, reg2, addr, post_inc l 278 arch/arm64/include/asm/alternative.h .macro uao_stp l, reg1, reg2, addr, post_inc l 281 arch/arm64/include/asm/alternative.h .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc l 127 arch/arm64/include/asm/arch_gicv3.h #define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) l 145 arch/arm64/include/asm/assembler.h #define USER(l, x...) \ l 147 arch/arm64/include/asm/assembler.h _asm_extable 9999b, l l 102 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\ l 105 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\ l 106 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\ l 109 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) l 117 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\ l 120 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) l 201 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \ l 204 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \ l 205 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \ l 208 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) l 216 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \ l 219 arch/arm64/include/asm/atomic_ll_sc.h ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) l 307 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K) l 308 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K) l 309 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K) l 310 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) l 311 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K) l 312 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K) l 313 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K) l 314 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) l 348 arch/arm64/include/asm/atomic_ll_sc.h __CMPXCHG_DBL(_mb, dmb ish, l, "memory") l 46 arch/arm64/include/asm/atomic_lse.h ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \ l 75 arch/arm64/include/asm/atomic_lse.h ATOMIC_OP_ADD_RETURN(_release, l, "memory") l 106 arch/arm64/include/asm/atomic_lse.h ATOMIC_FETCH_OP_AND(_release, l, "memory") l 140 arch/arm64/include/asm/atomic_lse.h ATOMIC_OP_SUB_RETURN(_release, l, "memory") l 161 arch/arm64/include/asm/atomic_lse.h ATOMIC_FETCH_OP_SUB(_release, l, "memory") l 199 arch/arm64/include/asm/atomic_lse.h ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \ l 228 arch/arm64/include/asm/atomic_lse.h ATOMIC64_OP_ADD_RETURN(_release, l, "memory") l 259 arch/arm64/include/asm/atomic_lse.h ATOMIC64_FETCH_OP_AND(_release, l, "memory") l 293 arch/arm64/include/asm/atomic_lse.h ATOMIC64_OP_SUB_RETURN(_release, l, "memory") l 314 arch/arm64/include/asm/atomic_lse.h ATOMIC64_FETCH_OP_SUB(_release, l, "memory") l 372 arch/arm64/include/asm/atomic_lse.h __CMPXCHG_CASE(w, b, rel_, 8, l, "memory") l 373 arch/arm64/include/asm/atomic_lse.h __CMPXCHG_CASE(w, h, rel_, 16, l, "memory") l 374 arch/arm64/include/asm/atomic_lse.h __CMPXCHG_CASE(w, , rel_, 32, l, "memory") l 375 arch/arm64/include/asm/atomic_lse.h __CMPXCHG_CASE(x, , rel_, 64, l, "memory") l 53 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, b, rel_, 8, , , , , l, "memory") l 54 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, h, rel_, 16, , , , , l, "memory") l 55 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, , rel_, 32, , , , , l, "memory") l 56 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE( , , rel_, 64, , , , , l, "memory") l 57 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory") l 58 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory") l 59 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory") l 60 arch/arm64/include/asm/cmpxchg.h __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") l 25 arch/arm64/include/asm/dmi.h #define dmi_early_remap(x, l) ioremap_cache(x, l) l 26 arch/arm64/include/asm/dmi.h #define dmi_early_unmap(x, l) iounmap(x) l 27 arch/arm64/include/asm/dmi.h #define dmi_remap(x, l) ioremap_cache(x, l) l 29 arch/arm64/include/asm/dmi.h #define dmi_alloc(l) kzalloc(l, GFP_KERNEL) l 158 arch/arm64/include/asm/io.h #define memset_io(c,v,l) __memset_io((c),(v),(l)) l 159 arch/arm64/include/asm/io.h #define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l)) l 160 arch/arm64/include/asm/io.h #define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l)) l 303 arch/arm64/include/asm/kvm_mmu.h #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) l 20 arch/c6x/include/asm/timex.h unsigned l, h; l 26 arch/c6x/include/asm/timex.h : "=b"(l), "=b"(h)); l 27 arch/c6x/include/asm/timex.h return ((cycles_t)h << 32) | l; l 44 arch/csky/include/asm/page.h extern void *memset(void *dest, int c, size_t l); l 45 arch/csky/include/asm/page.h extern void *memcpy(void *to, const void *from, size_t l); l 34 arch/h8300/lib/libgcc.h #define PUSHP push.l l 35 arch/h8300/lib/libgcc.h #define POPP pop.l l 39 arch/hexagon/include/asm/io.h #define readsw(p, d, l) __raw_readsw(p, d, l) l 40 arch/hexagon/include/asm/io.h #define writesw(p, d, l) __raw_writesw(p, d, l) l 42 arch/hexagon/include/asm/io.h #define readsl(p, d, l) __raw_readsl(p, d, l) l 43 arch/hexagon/include/asm/io.h #define writesl(p, d, l) __raw_writesl(p, d, l) l 10 arch/ia64/include/asm/dmi.h #define dmi_early_unmap(x, l) iounmap(x) l 13 arch/ia64/include/asm/dmi.h #define dmi_alloc(l) kzalloc(l, GFP_ATOMIC) l 45 arch/ia64/include/asm/kprobes.h unsigned long long l; l 106 arch/ia64/include/asm/mca_asm.h mov psr.l = temp2; \ l 170 arch/ia64/include/asm/mca_asm.h mov psr.l = temp2; \ l 134 arch/ia64/include/asm/page.h unsigned long l; l 144 arch/ia64/include/asm/page.h #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) l 145 arch/ia64/include/asm/page.h #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) l 147 arch/ia64/include/asm/page.h #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) l 148 arch/ia64/include/asm/page.h #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) l 438 arch/ia64/include/asm/sal.h #define SAL_LPI_PSI_INFO(l) \ l 439 arch/ia64/include/asm/sal.h ({ sal_log_processor_info_t *_l = (l); \ l 237 arch/ia64/include/asm/spinlock.h #define arch_write_lock(l) \ l 240 arch/ia64/include/asm/spinlock.h __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ l 153 arch/ia64/kernel/kprobes.c cmp_inst.l = kprobe_inst; l 880 arch/ia64/kernel/mca.c int l; l 882 arch/ia64/kernel/mca.c l = p - previous_current->comm; l 884 arch/ia64/kernel/mca.c l = strlen(previous_current->comm); l 886 arch/ia64/kernel/mca.c current->comm, l, previous_current->comm, l 998 arch/ia64/kernel/mca.c va.l = r12; l 1001 arch/ia64/kernel/mca.c r12 = va.l; l 1003 arch/ia64/kernel/mca.c va.l = r13; l 1006 arch/ia64/kernel/mca.c r13 = va.l; l 1010 arch/ia64/kernel/mca.c va.l = ar_bspstore; l 1013 arch/ia64/kernel/mca.c ar_bspstore = va.l; l 1015 arch/ia64/kernel/mca.c va.l = ar_bsp; l 1018 arch/ia64/kernel/mca.c ar_bsp = va.l; l 878 arch/ia64/kernel/setup.c unsigned long l, levels, unique_caches; l 894 arch/ia64/kernel/setup.c for (l = 0; l < levels; ++l) { l 896 arch/ia64/kernel/setup.c status = ia64_pal_cache_config_info(l, 2, &cci); l 900 arch/ia64/kernel/setup.c __func__, l, status); l 918 arch/ia64/kernel/setup.c status = ia64_pal_cache_config_info(l, 1, &cci); l 922 arch/ia64/kernel/setup.c __func__, l, status); l 1303 arch/ia64/kernel/unaligned.c unsigned long l; l 1384 arch/ia64/kernel/unaligned.c case 0: u.l = (bundle[0] >> 5); break; l 1385 arch/ia64/kernel/unaligned.c case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break; l 1386 arch/ia64/kernel/unaligned.c case 2: u.l = (bundle[1] >> 23); break; l 1388 arch/ia64/kernel/unaligned.c opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK; l 1185 arch/ia64/kernel/unwind.c #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg) l 1186 arch/ia64/kernel/unwind.c #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg) l 33 arch/m68k/coldfire/intc-2.c #define MCFSIM_ICR_LEVEL(l) ((l)<<3) /* Level l intr */ l 20 arch/m68k/fpsp040/fpsp.h | movem.l d0-d1/a0-a1,USER_DA(a6) l 22 arch/m68k/fpsp040/fpsp.h | fmove.l fpsr/fpcr/fpiar,USER_FPSR(a6) l 50 arch/m68k/fpsp040/fpsp.h | movem.l USER_DA(a6),d0-d1/a0-a1 l 52 arch/m68k/fpsp040/fpsp.h | fmove.l USER_FPSR(a6),fpsr/fpcr/fpiar l 508 arch/m68k/include/asm/atarihw.h u_long l; l 171 arch/m68k/include/asm/math-emu.h move.l (FPS_PC+4,%sp),\dest l 175 arch/m68k/include/asm/math-emu.h move.l \src,(FPS_PC+4,%sp) l 180 arch/m68k/include/asm/math-emu.h addq.l #\s,%sp@(FPS_PC+4) l 188 arch/m68k/include/asm/math-emu.h fp_get_instr_data l,4,\dest,\label,\addr l 202 arch/m68k/include/asm/math-emu.h .Lu2\@: move.l \addr,%a0 l 219 arch/m68k/include/asm/math-emu.h .Lu3\@: move.l \addr,%a0 l 236 arch/m68k/include/asm/math-emu.h .irp m b,w,l l 249 arch/m68k/include/asm/math-emu.h move.l \arg1,-(%sp) l 260 arch/m68k/include/asm/math-emu.h movem.l %d0/%d1/%a0/%a1,-(%sp) l 276 arch/m68k/include/asm/math-emu.h movem.l (%sp)+,%d0/%d1/%a0/%a1 l 282 arch/m68k/include/asm/math-emu.h movem.l %d0/%a0,-(%sp) l 290 arch/m68k/include/asm/math-emu.h move.l (4,%a0),%d0 l 298 arch/m68k/include/asm/math-emu.h ext.l %d0 l 303 arch/m68k/include/asm/math-emu.h movem.l (%sp)+,%d0/%a0 l 32 arch/m68k/include/asm/raw_io.h #define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l)) l 34 arch/m68k/include/asm/raw_io.h #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l)) l 74 arch/m68k/include/asm/uaccess_mm.h __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ l 142 arch/m68k/include/asm/uaccess_mm.h __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ l 147 arch/m68k/include/asm/uaccess_mm.h u64 l; \ l 167 arch/m68k/include/asm/uaccess_mm.h : "+d" (__gu_err), "=&r" (__gu_val.l), \ l 188 arch/m68k/include/asm/uaccess_mm.h #define __suffix4 l l 331 arch/m68k/include/asm/uaccess_mm.h __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); l 334 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); l 337 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,); l 340 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b); l 343 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,); l 346 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b); l 349 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w); l 352 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l); l 43 arch/m68k/include/asm/uaccess_no.h __put_user_asm(__pu_err, __pu_val, ptr, l); \ l 83 arch/m68k/include/asm/uaccess_no.h __get_user_asm(__gu_err, __gu_val, ptr, l, "=r"); \ l 149 arch/m68k/math-emu/fp_decode.h move.l %a0,%d0 l 155 arch/m68k/math-emu/fp_decode.h ext.l %d0 l 160 arch/m68k/math-emu/fp_decode.h debug move.l "%d1,-(%sp)" l 161 arch/m68k/math-emu/fp_decode.h debug ext.l "%d1" l 163 arch/m68k/math-emu/fp_decode.h debug move.l "(%sp)+,%d1" l 164 arch/m68k/math-emu/fp_decode.h lsl.l %d1,%d0 l 250 arch/m68k/math-emu/fp_decode.h move.l %a0,%a1 | save addr l 260 arch/m68k/math-emu/fp_decode.h move.l %a1,%a0 l 277 arch/m68k/math-emu/fp_decode.h move.l %a1,%a0 l 303 arch/m68k/math-emu/fp_decode.h add.l %a1,%a0 l 314 arch/m68k/math-emu/fp_decode.h getuser.l (%a1),%a1,fp_err_ua1,%a1 l 325 arch/m68k/math-emu/fp_decode.h getuser.l (%a1),%a1,fp_err_ua1,%a1 l 342 arch/m68k/math-emu/fp_decode.h 2: move.l %a0,%a1 l 348 arch/m68k/math-emu/fp_decode.h debug ext.l "%d0" l 352 arch/m68k/math-emu/fp_decode.h add.l %d0,%a1 l 360 arch/m68k/math-emu/fp_decode.h sub.l %a1,%a1 l 376 arch/m68k/math-emu/fp_decode.h 4: add.l %a0,%a1 l 381 arch/m68k/math-emu/fp_decode.h add.l %d0,%a1 l 398 arch/m68k/math-emu/fp_decode.h 4: add.l %a0,%a1 l 401 arch/m68k/math-emu/fp_decode.h 9: move.l %a1,%a0 l 97 arch/m68k/q40/config.c int l = strlen(str); l 100 arch/m68k/q40/config.c while (l-- > 0 && _cpleft-- > 0) { l 517 arch/mips/alchemy/common/irq.c unsigned long l; l 520 arch/mips/alchemy/common/irq.c l = __raw_readl(r + AU1300_GPIC_PINCFG); l 521 arch/mips/alchemy/common/irq.c l &= ~clr; l 522 arch/mips/alchemy/common/irq.c l |= set; l 523 arch/mips/alchemy/common/irq.c __raw_writel(l, r + AU1300_GPIC_PINCFG); l 153 arch/mips/alchemy/devboards/pm.c unsigned long l; l 157 arch/mips/alchemy/devboards/pm.c tmp = kstrtoul(instr, 0, &l); l 161 arch/mips/alchemy/devboards/pm.c db1x_pm_sleep_secs = l; l 180 arch/mips/alchemy/devboards/pm.c tmp = kstrtoul(instr, 0, &l); l 184 arch/mips/alchemy/devboards/pm.c db1x_pm_wakemsk = l & 0x0000003f; l 44 arch/mips/include/asm/asmmacro-32.h l.d $f0, THREAD_FPR0(\thread) l 45 arch/mips/include/asm/asmmacro-32.h l.d $f2, THREAD_FPR2(\thread) l 46 arch/mips/include/asm/asmmacro-32.h l.d $f4, THREAD_FPR4(\thread) l 47 arch/mips/include/asm/asmmacro-32.h l.d $f6, THREAD_FPR6(\thread) l 48 arch/mips/include/asm/asmmacro-32.h l.d $f8, THREAD_FPR8(\thread) l 49 arch/mips/include/asm/asmmacro-32.h l.d $f10, THREAD_FPR10(\thread) l 50 arch/mips/include/asm/asmmacro-32.h l.d $f12, THREAD_FPR12(\thread) l 51 arch/mips/include/asm/asmmacro-32.h l.d $f14, THREAD_FPR14(\thread) l 52 arch/mips/include/asm/asmmacro-32.h l.d $f16, THREAD_FPR16(\thread) l 53 arch/mips/include/asm/asmmacro-32.h l.d $f18, THREAD_FPR18(\thread) l 54 arch/mips/include/asm/asmmacro-32.h l.d $f20, THREAD_FPR20(\thread) l 55 arch/mips/include/asm/asmmacro-32.h l.d $f22, THREAD_FPR22(\thread) l 56 arch/mips/include/asm/asmmacro-32.h l.d $f24, THREAD_FPR24(\thread) l 57 arch/mips/include/asm/asmmacro-32.h l.d $f26, THREAD_FPR26(\thread) l 58 arch/mips/include/asm/asmmacro-32.h l.d $f28, THREAD_FPR28(\thread) l 59 arch/mips/include/asm/asmmacro-32.h l.d $f30, THREAD_FPR30(\thread) l 451 arch/mips/include/asm/io.h BUILDIO_MEM(l, u32) l 469 arch/mips/include/asm/io.h BUILDIO_IOPORT(l, u32) l 575 arch/mips/include/asm/io.h BUILDSTRING(l, u32) l 76 arch/mips/include/asm/ip32/mace.h volatile unsigned int l; l 684 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0) l 685 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0) l 686 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0) l 688 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2) l 689 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3) l 690 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0) l 692 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2) l 693 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3) l 694 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4) l 695 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5) l 696 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6) l 697 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7) l 701 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0) l 705 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0) l 710 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0) l 712 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1) l 721 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2) l 722 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0) l 723 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0) l 724 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2) l 725 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3) l 726 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4) l 727 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5) l 728 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6) l 729 arch/mips/include/asm/kvm_host.h __BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7) l 735 arch/mips/include/asm/kvm_host.h __BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1) l 19 arch/mips/include/asm/local.h #define local_read(l) atomic_long_read(&(l)->a) l 20 arch/mips/include/asm/local.h #define local_set(l, i) atomic_long_set(&(l)->a, (i)) l 22 arch/mips/include/asm/local.h #define local_add(i, l) atomic_long_add((i), (&(l)->a)) l 23 arch/mips/include/asm/local.h #define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) l 24 arch/mips/include/asm/local.h #define local_inc(l) atomic_long_inc(&(l)->a) l 25 arch/mips/include/asm/local.h #define local_dec(l) atomic_long_dec(&(l)->a) l 30 arch/mips/include/asm/local.h static __inline__ long local_add_return(long i, local_t * l) l 46 arch/mips/include/asm/local.h : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) l 47 arch/mips/include/asm/local.h : "Ir" (i), "m" (l->a.counter) l 61 arch/mips/include/asm/local.h : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) l 62 arch/mips/include/asm/local.h : "Ir" (i), "m" (l->a.counter) l 68 arch/mips/include/asm/local.h result = l->a.counter; l 70 arch/mips/include/asm/local.h l->a.counter = result; l 77 arch/mips/include/asm/local.h static __inline__ long local_sub_return(long i, local_t * l) l 93 arch/mips/include/asm/local.h : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) l 94 arch/mips/include/asm/local.h : "Ir" (i), "m" (l->a.counter) l 108 arch/mips/include/asm/local.h : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) l 109 arch/mips/include/asm/local.h : "Ir" (i), "m" (l->a.counter) l 115 arch/mips/include/asm/local.h result = l->a.counter; l 117 arch/mips/include/asm/local.h l->a.counter = result; l 124 arch/mips/include/asm/local.h #define local_cmpxchg(l, o, n) \ l 125 arch/mips/include/asm/local.h ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) l 126 arch/mips/include/asm/local.h #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) l 137 arch/mips/include/asm/local.h #define local_add_unless(l, a, u) \ l 140 arch/mips/include/asm/local.h c = local_read(l); \ l 141 arch/mips/include/asm/local.h while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \ l 145 arch/mips/include/asm/local.h #define local_inc_not_zero(l) local_add_unless((l), 1, 0) l 147 arch/mips/include/asm/local.h #define local_dec_return(l) local_sub_return(1, (l)) l 148 arch/mips/include/asm/local.h #define local_inc_return(l) local_add_return(1, (l)) l 159 arch/mips/include/asm/local.h #define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0) l 169 arch/mips/include/asm/local.h #define local_inc_and_test(l) (local_inc_return(l) == 0) l 179 arch/mips/include/asm/local.h #define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) l 190 arch/mips/include/asm/local.h #define local_add_negative(i, l) (local_add_return(i, (l)) < 0) l 197 arch/mips/include/asm/local.h #define __local_inc(l) ((l)->a.counter++) l 198 arch/mips/include/asm/local.h #define __local_dec(l) ((l)->a.counter++) l 199 arch/mips/include/asm/local.h #define __local_add(i, l) ((l)->a.counter+=(i)) l 200 arch/mips/include/asm/local.h #define __local_sub(i, l) ((l)->a.counter-=(i)) l 92 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h #define MSI_LINK_ADDR(n, l) (MSI_ADDR_BASE + \ l 93 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h (PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ) l 95 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h #define MSIX_LINK_ADDR(n, l) (MSIX_ADDR_BASE + \ l 96 arch/mips/include/asm/netlogic/xlp-hal/pcibus.h (PCIE_NLINKS * (n) + (l)) * MSI_ADDR_SZ) l 208 arch/mips/include/asm/pci/bridge.h u32 l[0x1000 / 4]; l 213 arch/mips/include/asm/pci/bridge.h u32 l[0x100 / 4]; l 222 arch/mips/include/asm/pci/bridge.h u32 l[0x1000 / 4]; l 232 arch/mips/include/asm/pci/bridge.h u32 l[8 / 4]; l 248 arch/mips/include/asm/pci/bridge.h u32 l[0x100000 / 4]; l 262 arch/mips/include/asm/pci/bridge.h u32 l[0x400000 / 4]; /* read-only */ l 406 arch/mips/include/asm/sn/klconfig.h #define IS_MIO_PRESENT(l) ((l->brd_type == KLTYPE_BASEIO) && \ l 407 arch/mips/include/asm/sn/klconfig.h (l->brd_flags & SECOND_NIC_PRESENT)) l 408 arch/mips/include/asm/sn/klconfig.h #define IS_MIO_IOC3(l, n) (IS_MIO_PRESENT(l) && (n > 2)) l 18 arch/mips/include/asm/tlbex.h void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, l 23 arch/mips/include/asm/tlbex.h void build_tlb_write_entry(u32 **p, struct uasm_label **l, l 118 arch/mips/include/asm/txx9/tx4939.h struct tx4939_le_reg l; l 130 arch/mips/include/asm/txx9/tx4939.h struct tx4939_le_reg l; l 342 arch/mips/include/asm/uaccess.h unsigned long long l; \ l 361 arch/mips/include/asm/uaccess.h : "=r" (__gu_err), "=&r" (__gu_tmp.l) \ l 998 arch/mips/include/uapi/asm/inst.h __BITFIELD_FIELD(unsigned int l : 1, l 380 arch/mips/kernel/branch.c if (inst.rr.l) { l 50 arch/mips/kernel/cevt-r4k.c unsigned int cnt, i, j, k, l; l 71 arch/mips/kernel/cevt-r4k.c l = min_t(unsigned int, l 73 arch/mips/kernel/cevt-r4k.c for (; l > k; --l) l 74 arch/mips/kernel/cevt-r4k.c buf1[l] = buf1[l - 1]; l 85 arch/mips/kernel/cevt-r4k.c l = min_t(unsigned int, l 87 arch/mips/kernel/cevt-r4k.c for (; l > k; --l) l 88 arch/mips/kernel/cevt-r4k.c buf2[l] = buf2[l - 1]; l 107 arch/mips/kernel/module.c static void free_relocation_chain(struct mips_hi16 *l) l 111 arch/mips/kernel/module.c while (l) { l 112 arch/mips/kernel/module.c next = l->next; l 113 arch/mips/kernel/module.c kfree(l); l 114 arch/mips/kernel/module.c l = next; l 122 arch/mips/kernel/module.c struct mips_hi16 *l; l 134 arch/mips/kernel/module.c l = me->arch.r_mips_hi16_list; l 135 arch/mips/kernel/module.c while (l != NULL) { l 142 arch/mips/kernel/module.c if (v != l->value) l 151 arch/mips/kernel/module.c insn = *l->addr; l 162 arch/mips/kernel/module.c *l->addr = insn; l 164 arch/mips/kernel/module.c next = l->next; l 165 arch/mips/kernel/module.c kfree(l); l 166 arch/mips/kernel/module.c l = next; l 182 arch/mips/kernel/module.c free_relocation_chain(l); l 344 arch/mips/kernel/pm-cps.c struct uasm_label *l = labels; l 401 arch/mips/kernel/pm-cps.c uasm_build_label(&l, p, lbl_incready); l 426 arch/mips/kernel/pm-cps.c uasm_build_label(&l, p, lbl_poll_cont); l 454 arch/mips/kernel/pm-cps.c uasm_build_label(&l, p, lbl_secondary_hang); l 465 arch/mips/kernel/pm-cps.c uasm_build_label(&l, p, lbl_disable_coherence); l 468 arch/mips/kernel/pm-cps.c cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, l 472 arch/mips/kernel/pm-cps.c cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, l 499 arch/mips/kernel/pm-cps.c err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], l 524 arch/mips/kernel/pm-cps.c uasm_build_label(&l, p, lbl_hang); l 548 arch/mips/kernel/pm-cps.c cps_gen_set_top_bit(&p, &l, &r, r_nc_count, l 556 arch/mips/kernel/pm-cps.c uasm_build_label(&l, p, lbl_secondary_cont); l 580 arch/mips/kernel/pm-cps.c uasm_build_label(&l, p, lbl_decready); l 598 arch/mips/kernel/pm-cps.c cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); l 608 arch/mips/kernel/pm-cps.c uasm_build_label(&l, p, lbl_secondary_cont); l 621 arch/mips/kernel/pm-cps.c BUG_ON((l - labels) > ARRAY_SIZE(labels)); l 338 arch/mips/kernel/vpe.c struct mips_hi16 *l, *next; l 345 arch/mips/kernel/vpe.c l = mips_hi16_list; l 346 arch/mips/kernel/vpe.c while (l != NULL) { l 352 arch/mips/kernel/vpe.c if (v != l->value) { l 363 arch/mips/kernel/vpe.c insn = *l->addr; l 374 arch/mips/kernel/vpe.c *l->addr = insn; l 376 arch/mips/kernel/vpe.c next = l->next; l 377 arch/mips/kernel/vpe.c kfree(l); l 378 arch/mips/kernel/vpe.c l = next; l 394 arch/mips/kernel/vpe.c while (l != NULL) { l 395 arch/mips/kernel/vpe.c next = l->next; l 396 arch/mips/kernel/vpe.c kfree(l); l 397 arch/mips/kernel/vpe.c l = next; l 298 arch/mips/kvm/entry.c struct uasm_label __maybe_unused *l = labels; l 380 arch/mips/kvm/entry.c uasm_l_kernel_asid(&l, p); l 472 arch/mips/kvm/entry.c struct uasm_label *l = labels; l 505 arch/mips/kvm/entry.c build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ l 514 arch/mips/kvm/entry.c build_tlb_write_entry(&p, &l, &r, tlb_random); l 547 arch/mips/kvm/entry.c struct uasm_label *l = labels; l 567 arch/mips/kvm/entry.c uasm_l_exit_common(&l, handler); l 590 arch/mips/kvm/entry.c struct uasm_label *l = labels; l 691 arch/mips/kvm/entry.c uasm_l_fpu_1(&l, p); l 707 arch/mips/kvm/entry.c uasm_l_msa_1(&l, p); l 822 arch/mips/kvm/entry.c struct uasm_label *l = labels; l 851 arch/mips/kvm/entry.c uasm_l_return_to_host(&l, p); l 26 arch/mips/loongson64/common/cmdline.c long l; l 35 arch/mips/loongson64/common/cmdline.c l = (long)_prom_argv[i]; l 36 arch/mips/loongson64/common/cmdline.c if (strlen(arcs_cmdline) + strlen(((char *)l) + 1) l 39 arch/mips/loongson64/common/cmdline.c strcat(arcs_cmdline, ((char *)l)); l 48 arch/mips/loongson64/common/env.c long l; l 53 arch/mips/loongson64/common/env.c l = (long)*_prom_envp; l 54 arch/mips/loongson64/common/env.c while (l != 0) { l 55 arch/mips/loongson64/common/env.c parse_even_earlier(cpu_clock_freq, "cpuclock", l); l 56 arch/mips/loongson64/common/env.c parse_even_earlier(memsize, "memsize", l); l 57 arch/mips/loongson64/common/env.c parse_even_earlier(highmemsize, "highmemsize", l); l 59 arch/mips/loongson64/common/env.c l = (long)*_prom_envp; l 1670 arch/mips/math-emu/cp1emu.c s64 l; l 2000 arch/mips/math-emu/cp1emu.c rv.l = ieee754sp_tlong(fs); l 2023 arch/mips/math-emu/cp1emu.c rv.l = ieee754sp_tlong(fs); l 2138 arch/mips/math-emu/cp1emu.c if (rv.l & 0x1) l 2139 arch/mips/math-emu/cp1emu.c rv.l = 0; l 2150 arch/mips/math-emu/cp1emu.c if (rv.l & 0x1) l 2153 arch/mips/math-emu/cp1emu.c rv.l = 0; l 2204 arch/mips/math-emu/cp1emu.c rv.l = ieee754dp_2008class(fs); l 2351 arch/mips/math-emu/cp1emu.c rv.l = ieee754dp_tlong(fs); l 2374 arch/mips/math-emu/cp1emu.c rv.l = ieee754dp_tlong(fs); l 2687 arch/mips/math-emu/cp1emu.c rv.l = 0; l 2697 arch/mips/math-emu/cp1emu.c rv.l = -1LL; /* true, all 1s */ l 2712 arch/mips/math-emu/cp1emu.c rv.l = -1LL; /* true, all 1s */ l 2775 arch/mips/math-emu/cp1emu.c DITOREG(rv.l, MIPSInst_FD(ir)); l 278 arch/mips/mm/page.c struct uasm_label *l = labels; l 315 arch/mips/mm/page.c uasm_l_clear_pref(&l, buf); l 333 arch/mips/mm/page.c uasm_l_clear_nopref(&l, buf); l 425 arch/mips/mm/page.c struct uasm_label *l = labels; l 472 arch/mips/mm/page.c uasm_l_copy_pref_both(&l, buf); l 520 arch/mips/mm/page.c uasm_l_copy_pref_store(&l, buf); l 562 arch/mips/mm/page.c uasm_l_copy_nopref(&l, buf); l 212 arch/mips/mm/tlbex.c static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance) l 216 arch/mips/mm/tlbex.c uasm_build_label(l, *p, label_tlbw_hazard_0 + instance); l 498 arch/mips/mm/tlbex.c void build_tlb_write_entry(u32 **p, struct uasm_label **l, l 529 arch/mips/mm/tlbex.c uasm_bgezl_label(l, p, hazard_instance); l 699 arch/mips/mm/tlbex.c static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l, l 710 arch/mips/mm/tlbex.c build_tlb_write_entry(p, l, r, wmode); l 763 arch/mips/mm/tlbex.c struct uasm_label **l, l 781 arch/mips/mm/tlbex.c build_tlb_write_entry(p, l, r, tlb_indexed); l 786 arch/mips/mm/tlbex.c build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0); l 792 arch/mips/mm/tlbex.c build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); l 801 arch/mips/mm/tlbex.c void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, l 863 arch/mips/mm/tlbex.c uasm_l_vmalloc_done(l, *p); l 892 arch/mips/mm/tlbex.c build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, l 902 arch/mips/mm/tlbex.c uasm_l_vmalloc(l, *p); l 928 arch/mips/mm/tlbex.c uasm_l_large_segbits_fault(l, *p); l 1106 arch/mips/mm/tlbex.c build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, l 1178 arch/mips/mm/tlbex.c uasm_l_vmalloc_done(l, *p); l 1273 arch/mips/mm/tlbex.c build_tlb_write_entry(p, l, r, tlb_random); l 1274 arch/mips/mm/tlbex.c uasm_l_leave(l, *p); l 1277 arch/mips/mm/tlbex.c build_tlb_write_entry(p, l, r, tlb_random); l 1278 arch/mips/mm/tlbex.c uasm_l_leave(l, *p); l 1282 arch/mips/mm/tlbex.c build_tlb_write_entry(p, l, r, tlb_random); l 1283 arch/mips/mm/tlbex.c uasm_l_leave(l, *p); l 1303 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 1316 arch/mips/mm/tlbex.c htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, l 1342 arch/mips/mm/tlbex.c build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ l 1353 arch/mips/mm/tlbex.c build_tlb_write_entry(&p, &l, &r, tlb_random); l 1354 arch/mips/mm/tlbex.c uasm_l_leave(&l, p); l 1358 arch/mips/mm/tlbex.c uasm_l_tlb_huge_update(&l, p); l 1362 arch/mips/mm/tlbex.c build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, l 1367 arch/mips/mm/tlbex.c build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); l 1450 arch/mips/mm/tlbex.c uasm_l_split(&l, final_handler); l 1531 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 1546 arch/mips/mm/tlbex.c uasm_l_vmalloc(&l, p); l 1574 arch/mips/mm/tlbex.c uasm_l_large_segbits_fault(&l, p); l 1603 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 1618 arch/mips/mm/tlbex.c uasm_l_tlbl_goaround1(&l, p); l 1895 arch/mips/mm/tlbex.c build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, l 1906 arch/mips/mm/tlbex.c uasm_l_r3000_write_probe_fail(l, *p); l 1935 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 1946 arch/mips/mm/tlbex.c build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); l 1948 arch/mips/mm/tlbex.c uasm_l_nopage_tlbl(&l, p); l 1965 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 1976 arch/mips/mm/tlbex.c build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); l 1978 arch/mips/mm/tlbex.c uasm_l_nopage_tlbs(&l, p); l 1995 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 2008 arch/mips/mm/tlbex.c uasm_l_nopage_tlbm(&l, p); l 2047 arch/mips/mm/tlbex.c build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, l 2053 arch/mips/mm/tlbex.c build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ l 2074 arch/mips/mm/tlbex.c uasm_l_smp_pgtable_change(l, *p); l 2091 arch/mips/mm/tlbex.c build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, l 2098 arch/mips/mm/tlbex.c build_tlb_write_entry(p, l, r, tlb_indexed); l 2099 arch/mips/mm/tlbex.c uasm_l_leave(l, *p); l 2104 arch/mips/mm/tlbex.c build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill); l 2111 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 2133 arch/mips/mm/tlbex.c wr = build_r4000_tlbchange_handler_head(&p, &l, &r); l 2192 arch/mips/mm/tlbex.c uasm_l_tlbl_goaround1(&l, p); l 2198 arch/mips/mm/tlbex.c uasm_l_tlbl_goaround1(&l, p); l 2201 arch/mips/mm/tlbex.c build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); l 2208 arch/mips/mm/tlbex.c uasm_l_tlb_huge_update(&l, p); l 2278 arch/mips/mm/tlbex.c uasm_l_tlbl_goaround2(&l, p); l 2281 arch/mips/mm/tlbex.c build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); l 2284 arch/mips/mm/tlbex.c uasm_l_nopage_tlbl(&l, p); l 2311 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 2319 arch/mips/mm/tlbex.c wr = build_r4000_tlbchange_handler_head(&p, &l, &r); l 2324 arch/mips/mm/tlbex.c build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); l 2331 arch/mips/mm/tlbex.c uasm_l_tlb_huge_update(&l, p); l 2337 arch/mips/mm/tlbex.c build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); l 2340 arch/mips/mm/tlbex.c uasm_l_nopage_tlbs(&l, p); l 2367 arch/mips/mm/tlbex.c struct uasm_label *l = labels; l 2375 arch/mips/mm/tlbex.c wr = build_r4000_tlbchange_handler_head(&p, &l, &r); l 2381 arch/mips/mm/tlbex.c build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2); l 2388 arch/mips/mm/tlbex.c uasm_l_tlb_huge_update(&l, p); l 2394 arch/mips/mm/tlbex.c build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0); l 2397 arch/mips/mm/tlbex.c uasm_l_nopage_tlbm(&l, p); l 505 arch/mips/mm/uasm.c struct uasm_label *l; l 508 arch/mips/mm/uasm.c for (l = lab; l->lab != UASM_LABEL_INVALID; l++) l 509 arch/mips/mm/uasm.c if (rel->lab == l->lab) l 510 arch/mips/mm/uasm.c __resolve_relocs(rel, l); l 56 arch/mips/pci/ops-mace.c *val = mace->pci.config_data.l; l 88 arch/mips/pci/ops-mace.c mace->pci.config_data.l = val; l 113 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; l 196 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; l 237 arch/mips/pci/pci-xtalk-bridge.c addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; l 49 arch/parisc/include/asm/assembly.h #define BL b,l l 36 arch/parisc/include/asm/atomic.h #define _atomic_spin_lock_irqsave(l,f) do { \ l 37 arch/parisc/include/asm/atomic.h arch_spinlock_t *s = ATOMIC_HASH(l); \ l 42 arch/parisc/include/asm/atomic.h #define _atomic_spin_unlock_irqrestore(l,f) do { \ l 43 arch/parisc/include/asm/atomic.h arch_spinlock_t *s = ATOMIC_HASH(l); \ l 50 arch/parisc/include/asm/atomic.h # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) l 51 arch/parisc/include/asm/atomic.h # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) l 205 arch/parisc/include/asm/io.h static inline void writel(unsigned int l, volatile void __iomem *addr) l 207 arch/parisc/include/asm/io.h __raw_writel((__u32 __force) cpu_to_le32(l), addr); l 229 arch/parisc/include/asm/io.h #define writel_relaxed(l, addr) writel(l, addr) l 76 arch/parisc/include/asm/psw.h unsigned int l:1; l 122 arch/parisc/include/asm/uaccess.h unsigned long long l; \ l 132 arch/parisc/include/asm/uaccess.h : "=&r"(__gu_tmp.l), "=r"(__gu_err) \ l 80 arch/parisc/kernel/pci.c PCI_PORT_IN(l, 32) l 95 arch/parisc/kernel/pci.c PCI_PORT_OUT(l, 32) l 61 arch/parisc/kernel/ptrace.c pa_psw(task)->l = 0; l 109 arch/parisc/kernel/ptrace.c pa_psw(task)->l = 0; l 121 arch/parisc/kernel/ptrace.c pa_psw(task)->l = 0; l 169 arch/parisc/lib/io.c unsigned int l = 0, l2; l 183 arch/parisc/lib/io.c l = cpu_to_le16(inw(port)) << 16; l 184 arch/parisc/lib/io.c l |= cpu_to_le16(inw(port)); l 185 arch/parisc/lib/io.c *(unsigned int *)p = l; l 200 arch/parisc/lib/io.c l = cpu_to_le16(inw(port)) << 16; l 201 arch/parisc/lib/io.c l |= cpu_to_le16(inw(port)); l 202 arch/parisc/lib/io.c *(unsigned int *)p = l; l 216 arch/parisc/lib/io.c l = cpu_to_le16(inw(port)); l 217 arch/parisc/lib/io.c *p = l >> 8; l 222 arch/parisc/lib/io.c *(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8); l 224 arch/parisc/lib/io.c l = l2; l 226 arch/parisc/lib/io.c *p = l & 0xff; l 241 arch/parisc/lib/io.c unsigned int l = 0, l2; l 262 arch/parisc/lib/io.c l = cpu_to_le32(inl(port)); l 263 arch/parisc/lib/io.c *(unsigned short *)p = l >> 16; l 269 arch/parisc/lib/io.c *(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16); l 271 arch/parisc/lib/io.c l = l2; l 273 arch/parisc/lib/io.c *(unsigned short *)p = l & 0xffff; l 278 arch/parisc/lib/io.c l = cpu_to_le32(inl(port)); l 279 arch/parisc/lib/io.c *(unsigned char *)p = l >> 24; l 281 arch/parisc/lib/io.c *(unsigned short *)p = (l >> 8) & 0xffff; l 286 arch/parisc/lib/io.c *(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8); l 288 arch/parisc/lib/io.c l = l2; l 290 arch/parisc/lib/io.c *p = l & 0xff; l 295 arch/parisc/lib/io.c l = cpu_to_le32(inl(port)); l 296 arch/parisc/lib/io.c *p = l >> 24; l 301 arch/parisc/lib/io.c *(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24; l 303 arch/parisc/lib/io.c l = l2; l 305 arch/parisc/lib/io.c *(unsigned short *)p = (l >> 8) & 0xffff; l 307 arch/parisc/lib/io.c *p = l & 0xff; l 339 arch/parisc/lib/io.c unsigned int l = 0, l2; l 352 arch/parisc/lib/io.c l = *(unsigned int *)p; l 354 arch/parisc/lib/io.c outw(le16_to_cpu(l >> 16), port); l 355 arch/parisc/lib/io.c outw(le16_to_cpu(l & 0xffff), port); l 370 arch/parisc/lib/io.c l = *(unsigned int *)p; l 372 arch/parisc/lib/io.c outw(le16_to_cpu(l >> 16), port); l 373 arch/parisc/lib/io.c outw(le16_to_cpu(l & 0xffff), port); l 384 arch/parisc/lib/io.c l = *p << 8; l 392 arch/parisc/lib/io.c outw(le16_to_cpu(l | l2 >> 8), port); l 393 arch/parisc/lib/io.c l = l2 << 8; l 396 arch/parisc/lib/io.c outw (le16_to_cpu(l | l2>>8), port); l 411 arch/parisc/lib/io.c unsigned int l = 0, l2; l 432 arch/parisc/lib/io.c l = *(unsigned short *)p; l 439 arch/parisc/lib/io.c outl (le32_to_cpu(l << 16 | l2 >> 16), port); l 440 arch/parisc/lib/io.c l = l2; l 443 arch/parisc/lib/io.c outl (le32_to_cpu(l << 16 | l2), port); l 448 arch/parisc/lib/io.c l = *p << 24; l 450 arch/parisc/lib/io.c l |= *(unsigned short *)p << 8; l 457 arch/parisc/lib/io.c outl (le32_to_cpu(l | l2 >> 24), port); l 458 arch/parisc/lib/io.c l = l2 << 8; l 461 arch/parisc/lib/io.c outl (le32_to_cpu(l | l2), port); l 466 arch/parisc/lib/io.c l = *p << 24; l 473 arch/parisc/lib/io.c outl (le32_to_cpu(l | l2 >> 8), port); l 474 arch/parisc/lib/io.c l = l2 << 24; l 479 arch/parisc/lib/io.c outl (le32_to_cpu(l | l2), port); l 276 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, l 285 arch/powerpc/include/asm/book3s/64/mmu-hash.h lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); l 294 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) l 296 arch/powerpc/include/asm/book3s/64/mmu-hash.h return __hpte_page_size(h, l, 0); l 299 arch/powerpc/include/asm/book3s/64/mmu-hash.h static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) l 301 arch/powerpc/include/asm/book3s/64/mmu-hash.h return __hpte_page_size(h, l, 1); l 205 arch/powerpc/include/asm/exception-64e.h ori r3,r3,vector_offset@l; \ l 220 arch/powerpc/include/asm/kvm_book3s_64.h static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l) l 226 arch/powerpc/include/asm/kvm_book3s_64.h lphi = (l >> 16) & 0xf; l 227 arch/powerpc/include/asm/kvm_book3s_64.h switch ((l >> 12) & 0xf) { l 250 arch/powerpc/include/asm/kvm_book3s_64.h static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l) l 252 arch/powerpc/include/asm/kvm_book3s_64.h return kvmppc_hpte_page_shifts(h, l) & 0xff; l 255 arch/powerpc/include/asm/kvm_book3s_64.h static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l) l 257 arch/powerpc/include/asm/kvm_book3s_64.h int tmp = kvmppc_hpte_page_shifts(h, l); l 20 arch/powerpc/include/asm/local.h static __inline__ long local_read(local_t *l) l 22 arch/powerpc/include/asm/local.h return READ_ONCE(l->v); l 25 arch/powerpc/include/asm/local.h static __inline__ void local_set(local_t *l, long i) l 27 arch/powerpc/include/asm/local.h WRITE_ONCE(l->v, i); l 31 arch/powerpc/include/asm/local.h static __inline__ void local_##op(long i, local_t *l) \ l 36 arch/powerpc/include/asm/local.h l->v c_op i; \ l 41 arch/powerpc/include/asm/local.h static __inline__ long local_##op##_return(long a, local_t *l) \ l 47 arch/powerpc/include/asm/local.h t = (l->v c_op a); \ l 60 arch/powerpc/include/asm/local.h #define local_add_negative(a, l) (local_add_return((a), (l)) < 0) l 61 arch/powerpc/include/asm/local.h #define local_inc_return(l) local_add_return(1LL, l) l 62 arch/powerpc/include/asm/local.h #define local_inc(l) local_inc_return(l) l 72 arch/powerpc/include/asm/local.h #define local_inc_and_test(l) (local_inc_return(l) == 0) l 74 arch/powerpc/include/asm/local.h #define local_dec_return(l) local_sub_return(1LL, l) l 75 arch/powerpc/include/asm/local.h #define local_dec(l) local_dec_return(l) l 76 arch/powerpc/include/asm/local.h #define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0) l 77 arch/powerpc/include/asm/local.h #define local_dec_and_test(l) (local_dec_return((l)) == 0) l 79 arch/powerpc/include/asm/local.h static __inline__ long local_cmpxchg(local_t *l, long o, long n) l 85 arch/powerpc/include/asm/local.h t = l->v; l 87 arch/powerpc/include/asm/local.h l->v = n; l 93 arch/powerpc/include/asm/local.h static __inline__ long local_xchg(local_t *l, long n) l 99 arch/powerpc/include/asm/local.h t = l->v; l 100 arch/powerpc/include/asm/local.h l->v = n; l 115 arch/powerpc/include/asm/local.h static __inline__ int local_add_unless(local_t *l, long a, long u) l 121 arch/powerpc/include/asm/local.h if (l->v != u) { l 122 arch/powerpc/include/asm/local.h l->v += a; l 130 arch/powerpc/include/asm/local.h #define local_inc_not_zero(l) local_add_unless((l), 1, 0) l 137 arch/powerpc/include/asm/local.h #define __local_inc(l) ((l)->v++) l 138 arch/powerpc/include/asm/local.h #define __local_dec(l) ((l)->v++) l 139 arch/powerpc/include/asm/local.h #define __local_add(i,l) ((l)->v+=(i)) l 140 arch/powerpc/include/asm/local.h #define __local_sub(i,l) ((l)->v-=(i)) l 127 arch/powerpc/include/asm/lppaca.h static inline bool lppaca_shared_proc(struct lppaca *l) l 131 arch/powerpc/include/asm/lppaca.h return !!(l->__old_status & LPPACA_OLD_SHARED_PROC); l 435 arch/powerpc/include/asm/ppc-opcode.h #define PPC_DARN(t, l) stringify_in_c(.long PPC_INST_DARN | \ l 437 arch/powerpc/include/asm/ppc-opcode.h (((l) & 0x3) << 16)) l 212 arch/powerpc/include/asm/ppc_asm.h addi r2,r2,(.TOC.-0b)@l; \ l 312 arch/powerpc/include/asm/ppc_asm.h addi reg,reg,(name - 0b)@l; l 324 arch/powerpc/include/asm/ppc_asm.h ori \r, \r, (\x)@l l 327 arch/powerpc/include/asm/ppc_asm.h li \r, (\x)@l l 339 arch/powerpc/include/asm/ppc_asm.h ori \r, \r, (\x)@l l 354 arch/powerpc/include/asm/ppc_asm.h ori reg, reg, (expr)@l; \ l 372 arch/powerpc/include/asm/ppc_asm.h addi reg,reg,(expr)@l; l 377 arch/powerpc/include/asm/ppc_asm.h #define ADDROFF(name) name@l l 830 arch/powerpc/include/asm/ppc_asm.h ori reg,reg,BUCSR_INIT@l; \ l 1345 arch/powerpc/include/asm/reg.h #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ l 420 arch/powerpc/kernel/btext.c int l, bits; l 424 arch/powerpc/kernel/btext.c for (l = 0; l < 16; ++l) l 441 arch/powerpc/kernel/btext.c int l, bits; l 446 arch/powerpc/kernel/btext.c for (l = 0; l < 16; ++l) l 459 arch/powerpc/kernel/btext.c int l, bits; l 464 arch/powerpc/kernel/btext.c for (l = 0; l < 16; ++l) l 173 arch/powerpc/kernel/eeh.c int n = 0, l = 0; l 233 arch/powerpc/kernel/eeh.c l = scnprintf(buffer, sizeof(buffer), l 237 arch/powerpc/kernel/eeh.c l += scnprintf(buffer+l, sizeof(buffer)-l, l 260 arch/powerpc/kernel/eeh.c l = scnprintf(buffer, sizeof(buffer), l 264 arch/powerpc/kernel/eeh.c l += scnprintf(buffer+l, sizeof(buffer)-l, l 57 arch/powerpc/kernel/head_32.h addi r10,r10,STACK_FRAME_REGS_MARKER@l l 87 arch/powerpc/kernel/head_32.h addi r10,r10,STACK_FRAME_REGS_MARKER@l l 112 arch/powerpc/kernel/head_32.h addi r11,r11,global_dbcr0@l l 123 arch/powerpc/kernel/head_32.h ori r11, r11, transfer_to_syscall@l l 16 arch/powerpc/kernel/head_booke.h li r26,vector_label@l; \ l 26 arch/powerpc/kernel/head_booke.h addi reg,reg,val@l l 81 arch/powerpc/kernel/head_booke.h addi r10, r10, STACK_FRAME_REGS_MARKER@l; \ l 123 arch/powerpc/kernel/head_booke.h addi r12, r12, STACK_FRAME_REGS_MARKER@l l 146 arch/powerpc/kernel/head_booke.h addi r11,r11,global_dbcr0@l l 161 arch/powerpc/kernel/head_booke.h ori r11, r11, transfer_to_syscall@l l 169 arch/powerpc/kernel/head_booke.h ori r10, r10, MSR_KERNEL@l l 173 arch/powerpc/kernel/head_booke.h ori r10, r10, (MSR_KERNEL | MSR_EE)@l l 212 arch/powerpc/kernel/head_booke.h lwz r8,level##_STACK_BASE@l(r8); \ l 217 arch/powerpc/kernel/head_booke.h lwz r8,level##_STACK_BASE@l(r8); \ l 332 arch/powerpc/kernel/head_booke.h ori r10,r10,msr@l; \ l 375 arch/powerpc/kernel/head_booke.h ori r10,r10,interrupt_base@l; \ l 380 arch/powerpc/kernel/head_booke.h ori r10,r10,interrupt_end@l; \ l 428 arch/powerpc/kernel/head_booke.h ori r10,r10,interrupt_base@l; \ l 433 arch/powerpc/kernel/head_booke.h ori r10,r10,interrupt_end@l; \ l 763 arch/powerpc/kernel/prom_init.c int l = 0; l 768 arch/powerpc/kernel/prom_init.c l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1); l 769 arch/powerpc/kernel/prom_init.c if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && (l <= 0 || p[0] == '\0')) /* dbl check */ l 2523 arch/powerpc/kernel/prom_init.c int l, room, has_phandle = 0; l 2532 arch/powerpc/kernel/prom_init.c l = call_prom("package-to-path", 3, 1, node, namep, room); l 2533 arch/powerpc/kernel/prom_init.c if (l >= 0) { l 2535 arch/powerpc/kernel/prom_init.c if (l >= room) { l 2536 arch/powerpc/kernel/prom_init.c if (l >= *mem_end - *mem_start) l 2537 arch/powerpc/kernel/prom_init.c namep = make_room(mem_start, mem_end, l+1, 1); l 2538 arch/powerpc/kernel/prom_init.c call_prom("package-to-path", 3, 1, node, namep, l); l 2540 arch/powerpc/kernel/prom_init.c namep[l] = '\0'; l 2546 arch/powerpc/kernel/prom_init.c for (lp = p = namep, ep = namep + l; p < ep; p++) { l 2585 arch/powerpc/kernel/prom_init.c l = call_prom("getproplen", 2, 1, node, pname); l 2588 arch/powerpc/kernel/prom_init.c if (l == PROM_ERROR) l 2593 arch/powerpc/kernel/prom_init.c dt_push_token(l, mem_start, mem_end); l 2597 arch/powerpc/kernel/prom_init.c valp = make_room(mem_start, mem_end, l, 4); l 2598 arch/powerpc/kernel/prom_init.c call_prom("getprop", 4, 1, node, pname, valp, l); l 815 arch/powerpc/kvm/book3s_hv_rm_xics.c unsigned long l; l 820 arch/powerpc/kvm/book3s_hv_rm_xics.c l = (unsigned long)raddr; l 822 arch/powerpc/kvm/book3s_hv_rm_xics.c if (get_region_id(l) == VMALLOC_REGION_ID) { l 823 arch/powerpc/kvm/book3s_hv_rm_xics.c l = vmalloc_to_phys(raddr); l 824 arch/powerpc/kvm/book3s_hv_rm_xics.c raddr = (unsigned int *)l; l 31 arch/powerpc/lib/rheap.c struct list_head *l) l 35 arch/powerpc/lib/rheap.c pp = (unsigned long *)&l->next; l 39 arch/powerpc/lib/rheap.c pp = (unsigned long *)&l->prev; l 159 arch/powerpc/lib/rheap.c struct list_head *l; l 172 arch/powerpc/lib/rheap.c list_for_each(l, &info->free_list) { l 173 arch/powerpc/lib/rheap.c blk = list_entry(l, rh_block_t, list); l 235 arch/powerpc/lib/rheap.c struct list_head *l; l 238 arch/powerpc/lib/rheap.c list_for_each(l, &info->taken_list) { l 239 arch/powerpc/lib/rheap.c blk = list_entry(l, rh_block_t, list); l 372 arch/powerpc/lib/rheap.c struct list_head *l; l 395 arch/powerpc/lib/rheap.c list_for_each(l, &info->free_list) { l 396 arch/powerpc/lib/rheap.c blk = list_entry(l, rh_block_t, list); l 444 arch/powerpc/lib/rheap.c struct list_head *l; l 460 arch/powerpc/lib/rheap.c list_for_each(l, &info->free_list) { l 461 arch/powerpc/lib/rheap.c blk = list_entry(l, rh_block_t, list); l 529 arch/powerpc/lib/rheap.c struct list_head *l; l 552 arch/powerpc/lib/rheap.c list_for_each(l, &info->free_list) { l 553 arch/powerpc/lib/rheap.c blk = list_entry(l, rh_block_t, list); l 615 arch/powerpc/lib/rheap.c struct list_head *l; l 620 arch/powerpc/lib/rheap.c list_for_each(l, &info->taken_list) { l 621 arch/powerpc/lib/rheap.c blk2 = list_entry(l, rh_block_t, list); l 644 arch/powerpc/lib/rheap.c struct list_head *l; l 664 arch/powerpc/lib/rheap.c list_for_each(l, h) { l 665 arch/powerpc/lib/rheap.c blk = list_entry(l, rh_block_t, list); l 682 arch/powerpc/lib/rheap.c struct list_head *l; l 687 arch/powerpc/lib/rheap.c list_for_each(l, &info->taken_list) { l 688 arch/powerpc/lib/rheap.c blk2 = list_entry(l, rh_block_t, list); l 467 arch/powerpc/lib/sstep.c unsigned long l[2]; l 488 arch/powerpc/lib/sstep.c u.l[0] = u.i; l 490 arch/powerpc/lib/sstep.c u.l[0] = u.u; l 495 arch/powerpc/lib/sstep.c current->thread.TS_FPR(rn) = u.l[0]; l 502 arch/powerpc/lib/sstep.c current->thread.TS_FPR(rn) = u.l[1]; l 517 arch/powerpc/lib/sstep.c unsigned long l[2]; l 529 arch/powerpc/lib/sstep.c u.l[0] = current->thread.TS_FPR(rn); l 534 arch/powerpc/lib/sstep.c u.u = u.l[0]; l 541 arch/powerpc/lib/sstep.c u.l[1] = current->thread.TS_FPR(rn); l 211 arch/powerpc/platforms/powermac/bootx_init.c unsigned int l = strlen(s) + 1; l 212 arch/powerpc/platforms/powermac/bootx_init.c memcpy((void *)*mem_end, s, l); l 213 arch/powerpc/platforms/powermac/bootx_init.c bootx_dt_strend = *mem_end = *mem_end + l; l 281 arch/powerpc/platforms/powermac/bootx_init.c int l; l 289 arch/powerpc/platforms/powermac/bootx_init.c l = strlen(namep); l 297 arch/powerpc/platforms/powermac/bootx_init.c memcpy((void *)*mem_end, namep, l + 1); l 299 arch/powerpc/platforms/powermac/bootx_init.c for (lp = p = namep, ep = namep + l; p < ep; p++) { l 124 arch/powerpc/platforms/powermac/setup.c int l = strlen(pp) + 1; l 126 arch/powerpc/platforms/powermac/setup.c plen -= l; l 127 arch/powerpc/platforms/powermac/setup.c pp += l; l 533 arch/powerpc/platforms/ps3/repository.c u64 l; l 536 arch/powerpc/platforms/ps3/repository.c repo->dev_index, res_index, &t, &a, &l); l 546 arch/powerpc/platforms/ps3/repository.c *len = l; l 190 arch/powerpc/platforms/pseries/iommu.c long l, limit; l 233 arch/powerpc/platforms/pseries/iommu.c for (l = 0; l < limit; l++) { l 234 arch/powerpc/platforms/pseries/iommu.c tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT); l 403 arch/powerpc/platforms/pseries/iommu.c long l, limit; l 455 arch/powerpc/platforms/pseries/iommu.c for (l = 0; l < limit; l++) { l 456 arch/powerpc/platforms/pseries/iommu.c tcep[l] = cpu_to_be64(proto_tce | next); l 61 arch/powerpc/sysdev/dart_iommu.c unsigned long l = 0; l 81 arch/powerpc/sysdev/dart_iommu.c l = 0; l 86 arch/powerpc/sysdev/dart_iommu.c while ((DART_IN(DART_CNTL) & inv_bit) && l < (1L << limit)) l 87 arch/powerpc/sysdev/dart_iommu.c l++; l 88 arch/powerpc/sysdev/dart_iommu.c if (l == (1L << limit)) { l 106 arch/powerpc/sysdev/dart_iommu.c unsigned int l, limit; l 117 arch/powerpc/sysdev/dart_iommu.c l = 0; l 118 arch/powerpc/sysdev/dart_iommu.c while ((DART_IN(DART_CNTL) & DART_CNTL_U4_IONE) && l < (1L << limit)) { l 120 arch/powerpc/sysdev/dart_iommu.c l++; l 123 arch/powerpc/sysdev/dart_iommu.c if (l == (1L << limit)) { l 179 arch/powerpc/sysdev/dart_iommu.c long l; l 188 arch/powerpc/sysdev/dart_iommu.c l = npages; l 189 arch/powerpc/sysdev/dart_iommu.c while (l--) { l 565 arch/powerpc/sysdev/mpic.c u32 l = readl(devbase + PCI_VENDOR_ID); l 568 arch/powerpc/sysdev/mpic.c DBG("devfn %x, l: %x\n", devfn, l); l 571 arch/powerpc/sysdev/mpic.c if (l == 0xffffffff || l == 0x00000000 || l 572 arch/powerpc/sysdev/mpic.c l == 0x0000ffff || l == 0xffff0000) l 579 arch/powerpc/sysdev/mpic.c mpic_scan_ht_pic(mpic, devbase, devfn, l); l 2311 arch/powerpc/xmon/ppc-opc.c #define OPL(x,l) (OP (x) | ((((unsigned long)(l)) & 1) << 21)) l 2724 arch/powerpc/xmon/ppc-opc.c #define XOPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21)) l 2727 arch/powerpc/xmon/ppc-opc.c #define XOPL2(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 3) << 21)) l 2730 arch/powerpc/xmon/ppc-opc.c #define XRCL(op, xop, l, rc) (XRC ((op), (xop), (rc)) | ((((unsigned long)(l)) & 1) << 21)) l 2757 arch/powerpc/xmon/ppc-opc.c #define XSYNC(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 3) << 21)) l 247 arch/riscv/include/asm/io.h __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) l 254 arch/riscv/include/asm/io.h __io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) l 261 arch/riscv/include/asm/io.h __io_writes_outs(writes, u32, l, __io_bw(), __io_aw()) l 268 arch/riscv/include/asm/io.h __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) l 28 arch/s390/include/uapi/asm/runtime_instr.h __u32 l : 1; l 113 arch/s390/mm/extmem.c struct list_head *l; l 118 arch/s390/mm/extmem.c list_for_each (l, &dcss_list) { l 119 arch/s390/mm/extmem.c tmp = list_entry (l, struct dcss_segment, list); l 273 arch/s390/mm/extmem.c struct list_head *l; l 277 arch/s390/mm/extmem.c list_for_each(l, &dcss_list) { l 278 arch/s390/mm/extmem.c tmp = list_entry(l, struct dcss_segment, list); l 254 arch/s390/mm/page-states.c struct list_head *l; l 265 arch/s390/mm/page-states.c list_for_each(l, &zone->free_area[order].free_list[t]) { l 266 arch/s390/mm/page-states.c page = list_entry(l, struct page, lru); l 21 arch/sh/boards/mach-highlander/psw.c unsigned int l, mask; l 24 arch/sh/boards/mach-highlander/psw.c l = __raw_readw(PA_DBSW); l 32 arch/sh/boards/mach-highlander/psw.c mask = l & 0x70; l 44 arch/sh/boards/mach-highlander/psw.c l |= (0x7 << 12); l 45 arch/sh/boards/mach-highlander/psw.c __raw_writew(l, PA_DBSW); l 60 arch/sh/include/asm/io.h #define readsb(p,d,l) __raw_readsb(p,d,l) l 61 arch/sh/include/asm/io.h #define readsw(p,d,l) __raw_readsw(p,d,l) l 62 arch/sh/include/asm/io.h #define readsl(p,d,l) __raw_readsl(p,d,l) l 64 arch/sh/include/asm/io.h #define writesb(p,d,l) __raw_writesb(p,d,l) l 65 arch/sh/include/asm/io.h #define writesw(p,d,l) __raw_writesw(p,d,l) l 66 arch/sh/include/asm/io.h #define writesl(p,d,l) __raw_writesl(p,d,l) l 87 arch/sh/include/asm/io.h __BUILD_UNCACHED_IO(l, u32) l 122 arch/sh/include/asm/io.h __BUILD_MEMORY_STRING(__raw_, l, u32) l 191 arch/sh/include/asm/io.h BUILDIO_IOPORT(l, u32) l 220 arch/sh/include/asm/io.h __BUILD_IOPORT_STRING(l, u32) l 11 arch/sh/include/asm/romimage-macros.h mov.l 1f, r1 l 12 arch/sh/include/asm/romimage-macros.h mov.l 2f, r0 l 13 arch/sh/include/asm/romimage-macros.h mov.l r0, @r1 l 24 arch/sh/include/asm/romimage-macros.h mov.l 1f, r1 l 25 arch/sh/include/asm/romimage-macros.h mov.l 2f, r0 l 37 arch/sh/include/asm/romimage-macros.h mov.l 1f, r1 l 38 arch/sh/include/asm/romimage-macros.h mov.l 2f, r0 l 50 arch/sh/include/asm/romimage-macros.h mov.l 2f, r3 l 65 arch/sh/include/asm/romimage-macros.h mov.l 1f, r1 l 66 arch/sh/include/asm/romimage-macros.h mov.l @r1, r0 l 13 arch/sh/include/mach-ecovec24/mach/romimage.h mov.l 1f, r0 l 13 arch/sh/include/mach-kfr2r09/mach/romimage.h mov.l 1f, r0 l 49 arch/sparc/include/asm/checksum_32.h register int l asm("g1") = len; l 54 arch/sparc/include/asm/checksum_32.h : "=&r" (ret), "=&r" (d), "=&r" (l) l 55 arch/sparc/include/asm/checksum_32.h : "0" (ret), "1" (d), "2" (l), "r" (sum) l 68 arch/sparc/include/asm/checksum_32.h register int l asm("g1") = len; l 79 arch/sparc/include/asm/checksum_32.h : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s) l 80 arch/sparc/include/asm/checksum_32.h : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err) l 96 arch/sparc/include/asm/checksum_32.h register int l asm("g1") = len; l 107 arch/sparc/include/asm/checksum_32.h : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s) l 108 arch/sparc/include/asm/checksum_32.h : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err) l 83 arch/sparc/include/asm/io_32.h static inline void sbus_writel(u32 l, volatile void __iomem *addr) l 85 arch/sparc/include/asm/io_32.h *(__force volatile u32 *)addr = l; l 85 arch/sparc/include/asm/io_64.h static inline void __raw_writel(u32 l, const volatile void __iomem *addr) l 89 arch/sparc/include/asm/io_64.h : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); l 177 arch/sparc/include/asm/io_64.h static inline void writel(u32 l, volatile void __iomem *addr) l 181 arch/sparc/include/asm/io_64.h : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) l 226 arch/sparc/include/asm/io_64.h static inline void outl(u32 l, unsigned long addr) l 228 arch/sparc/include/asm/io_64.h writel(l, (volatile void __iomem *)addr); l 275 arch/sparc/include/asm/io_64.h #define ioread8_rep(p,d,l) readsb(p,d,l) l 276 arch/sparc/include/asm/io_64.h #define ioread16_rep(p,d,l) readsw(p,d,l) l 277 arch/sparc/include/asm/io_64.h #define ioread32_rep(p,d,l) readsl(p,d,l) l 278 arch/sparc/include/asm/io_64.h #define iowrite8_rep(p,d,l) writesb(p,d,l) l 279 arch/sparc/include/asm/io_64.h #define iowrite16_rep(p,d,l) writesw(p,d,l) l 280 arch/sparc/include/asm/io_64.h #define iowrite32_rep(p,d,l) writesl(p,d,l) l 320 arch/sparc/include/asm/io_64.h static inline void sbus_writel(u32 l, volatile void __iomem *addr) l 322 arch/sparc/include/asm/io_64.h __raw_writel(l, addr); l 23 arch/sparc/include/asm/prom.h #define of_compat_cmp(s1, s2, l) strncmp((s1), (s2), (l)) l 86 arch/sparc/include/asm/upa.h static inline void _upa_writel(unsigned int l, unsigned long addr) l 90 arch/sparc/include/asm/upa.h : "r" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); l 244 arch/sparc/kernel/btext.c int l, bits; l 248 arch/sparc/kernel/btext.c for (l = 0; l < 16; ++l) l 265 arch/sparc/kernel/btext.c int l, bits; l 270 arch/sparc/kernel/btext.c for (l = 0; l < 16; ++l) l 283 arch/sparc/kernel/btext.c int l, bits; l 288 arch/sparc/kernel/btext.c for (l = 0; l < 16; ++l) l 103 arch/sparc/kernel/prom_common.c int l; l 107 arch/sparc/kernel/prom_common.c l = strlen(list) + 1; l 108 arch/sparc/kernel/prom_common.c list += l; l 109 arch/sparc/kernel/prom_common.c len -= l; l 37 arch/sparc/lib/PeeCeeI.c u32 l, l2; l 53 arch/sparc/lib/PeeCeeI.c l = (*(u16 *)src) << 16; l 54 arch/sparc/lib/PeeCeeI.c l |= *(u16 *)(src + sizeof(u16)); l 55 arch/sparc/lib/PeeCeeI.c __raw_writel(l, addr); l 61 arch/sparc/lib/PeeCeeI.c l = (*(u8 *)src) << 24; l 62 arch/sparc/lib/PeeCeeI.c l |= (*(u16 *)(src + sizeof(u8))) << 8; l 66 arch/sparc/lib/PeeCeeI.c l |= (l2 >> 24); l 67 arch/sparc/lib/PeeCeeI.c __raw_writel(l, addr); l 68 arch/sparc/lib/PeeCeeI.c l = l2 << 8; l 74 arch/sparc/lib/PeeCeeI.c l = (*(u8 *)src) << 24; l 78 arch/sparc/lib/PeeCeeI.c l |= (l2 >> 8); l 79 arch/sparc/lib/PeeCeeI.c __raw_writel(l, addr); l 80 arch/sparc/lib/PeeCeeI.c l = l2 << 24; l 154 arch/sparc/lib/PeeCeeI.c u32 l = 0, l2, *pi; l 162 arch/sparc/lib/PeeCeeI.c l = __raw_readl(addr); l 163 arch/sparc/lib/PeeCeeI.c *ps++ = l; l 167 arch/sparc/lib/PeeCeeI.c *pi++ = (l << 16) | (l2 >> 16); l 168 arch/sparc/lib/PeeCeeI.c l = l2; l 171 arch/sparc/lib/PeeCeeI.c *ps = l; l 177 arch/sparc/lib/PeeCeeI.c l = __raw_readl(addr); l 178 arch/sparc/lib/PeeCeeI.c *pb++ = l >> 24; l 180 arch/sparc/lib/PeeCeeI.c *ps++ = ((l >> 8) & 0xffff); l 184 arch/sparc/lib/PeeCeeI.c *pi++ = (l << 24) | (l2 >> 8); l 185 arch/sparc/lib/PeeCeeI.c l = l2; l 188 arch/sparc/lib/PeeCeeI.c *pb = l; l 194 arch/sparc/lib/PeeCeeI.c l = __raw_readl(addr); l 195 arch/sparc/lib/PeeCeeI.c *pb++ = l >> 24; l 199 arch/sparc/lib/PeeCeeI.c *pi++ = (l << 8) | (l2 >> 24); l 200 arch/sparc/lib/PeeCeeI.c l = l2; l 203 arch/sparc/lib/PeeCeeI.c *ps++ = ((l >> 8) & 0xffff); l 205 arch/sparc/lib/PeeCeeI.c *pb = l; l 851 arch/x86/boot/compressed/eboot.c desc->l = 0; l 874 arch/x86/boot/compressed/eboot.c desc->l = 1; l 877 arch/x86/boot/compressed/eboot.c desc->l = 0; l 894 arch/x86/boot/compressed/eboot.c desc->l = 0; l 911 arch/x86/boot/compressed/eboot.c desc->l = 0; l 18 arch/x86/boot/string.h #define memcpy(d,s,l) __builtin_memcpy(d,s,l) l 19 arch/x86/boot/string.h #define memset(d,c,l) __builtin_memset(d,c,l) l 774 arch/x86/crypto/camellia_glue.c #define ROLDQ(l, r, bits) ({ \ l 775 arch/x86/crypto/camellia_glue.c u64 t = l; \ l 776 arch/x86/crypto/camellia_glue.c l = (l << bits) | (r >> (64 - bits)); \ l 616 arch/x86/events/perf_event.h u64 (*limit_period)(struct perf_event *event, u64 l); l 25 arch/x86/include/asm/asm.h #define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \ l 40 arch/x86/include/asm/desc.h desc->l = 0; l 20 arch/x86/include/asm/desc_defs.h u16 limit1: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; l 35 arch/x86/include/asm/desc_defs.h .l = (flags >> 13) & 0x01, \ l 341 arch/x86/include/asm/io.h BUILDIO(l, , int) l 1043 arch/x86/include/asm/kvm_host.h void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); l 1390 arch/x86/include/asm/kvm_host.h void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); l 16 arch/x86/include/asm/local.h #define local_read(l) atomic_long_read(&(l)->a) l 17 arch/x86/include/asm/local.h #define local_set(l, i) atomic_long_set(&(l)->a, (i)) l 19 arch/x86/include/asm/local.h static inline void local_inc(local_t *l) l 22 arch/x86/include/asm/local.h : "+m" (l->a.counter)); l 25 arch/x86/include/asm/local.h static inline void local_dec(local_t *l) l 28 arch/x86/include/asm/local.h : "+m" (l->a.counter)); l 31 arch/x86/include/asm/local.h static inline void local_add(long i, local_t *l) l 34 arch/x86/include/asm/local.h : "+m" (l->a.counter) l 38 arch/x86/include/asm/local.h static inline void local_sub(long i, local_t *l) l 41 arch/x86/include/asm/local.h : "+m" (l->a.counter) l 54 arch/x86/include/asm/local.h static inline bool local_sub_and_test(long i, local_t *l) l 56 arch/x86/include/asm/local.h return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i); l 67 arch/x86/include/asm/local.h static inline bool local_dec_and_test(local_t *l) l 69 arch/x86/include/asm/local.h return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e); l 80 arch/x86/include/asm/local.h static inline bool local_inc_and_test(local_t *l) l 82 arch/x86/include/asm/local.h return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e); l 94 arch/x86/include/asm/local.h static inline bool local_add_negative(long i, local_t *l) l 96 arch/x86/include/asm/local.h return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i); l 106 arch/x86/include/asm/local.h static inline long local_add_return(long i, local_t *l) l 110 arch/x86/include/asm/local.h : "+r" (i), "+m" (l->a.counter) l 115 arch/x86/include/asm/local.h static inline long local_sub_return(long i, local_t *l) l 117 arch/x86/include/asm/local.h return local_add_return(-i, l); l 120 arch/x86/include/asm/local.h #define local_inc_return(l) (local_add_return(1, l)) l 121 arch/x86/include/asm/local.h #define local_dec_return(l) (local_sub_return(1, l)) l 123 arch/x86/include/asm/local.h #define local_cmpxchg(l, o, n) \ l 124 arch/x86/include/asm/local.h (cmpxchg_local(&((l)->a.counter), (o), (n))) l 126 arch/x86/include/asm/local.h #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) l 137 arch/x86/include/asm/local.h #define local_add_unless(l, a, u) \ l 140 arch/x86/include/asm/local.h c = local_read((l)); \ l 144 arch/x86/include/asm/local.h old = local_cmpxchg((l), c, c + (a)); \ l 151 arch/x86/include/asm/local.h #define local_inc_not_zero(l) local_add_unless((l), 1, 0) l 157 arch/x86/include/asm/local.h #define __local_inc(l) local_inc(l) l 158 arch/x86/include/asm/local.h #define __local_dec(l) local_dec(l) l 159 arch/x86/include/asm/local.h #define __local_add(i, l) local_add((i), (l)) l 160 arch/x86/include/asm/local.h #define __local_sub(i, l) local_sub((i), (l)) l 17 arch/x86/include/asm/msr.h u32 l; l 339 arch/x86/include/asm/msr.h int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); l 340 arch/x86/include/asm/msr.h int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); l 345 arch/x86/include/asm/msr.h int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); l 346 arch/x86/include/asm/msr.h int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); l 352 arch/x86/include/asm/msr.h static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) l 354 arch/x86/include/asm/msr.h rdmsr(msr_no, *l, *h); l 357 arch/x86/include/asm/msr.h static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) l 359 arch/x86/include/asm/msr.h wrmsr(msr_no, l, h); l 375 arch/x86/include/asm/msr.h rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); l 380 arch/x86/include/asm/msr.h wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); l 383 arch/x86/include/asm/msr.h u32 *l, u32 *h) l 385 arch/x86/include/asm/msr.h return rdmsr_safe(msr_no, l, h); l 387 arch/x86/include/asm/msr.h static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) l 389 arch/x86/include/asm/msr.h return wrmsr_safe(msr_no, l, h); l 355 arch/x86/include/asm/xen/interface.h } l; l 136 arch/x86/include/uapi/asm/kvm.h __u8 present, dpl, db, s, l, g, avl; l 1275 arch/x86/kernel/apic/apic.c unsigned int l, h; l 1277 arch/x86/kernel/apic/apic.c rdmsr(MSR_IA32_APICBASE, l, h); l 1278 arch/x86/kernel/apic/apic.c l &= ~MSR_IA32_APICBASE_ENABLE; l 1279 arch/x86/kernel/apic/apic.c wrmsr(MSR_IA32_APICBASE, l, h); l 1991 arch/x86/kernel/apic/apic.c u32 features, h, l; l 2007 arch/x86/kernel/apic/apic.c rdmsr(MSR_IA32_APICBASE, l, h); l 2008 arch/x86/kernel/apic/apic.c if (l & MSR_IA32_APICBASE_ENABLE) l 2009 arch/x86/kernel/apic/apic.c mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; l 2018 arch/x86/kernel/apic/apic.c u32 h, l; l 2029 arch/x86/kernel/apic/apic.c rdmsr(MSR_IA32_APICBASE, l, h); l 2030 arch/x86/kernel/apic/apic.c if (!(l & MSR_IA32_APICBASE_ENABLE)) { l 2032 arch/x86/kernel/apic/apic.c l &= ~MSR_IA32_APICBASE_BASE; l 2033 arch/x86/kernel/apic/apic.c l |= MSR_IA32_APICBASE_ENABLE | addr; l 2034 arch/x86/kernel/apic/apic.c wrmsr(MSR_IA32_APICBASE, l, h); l 2656 arch/x86/kernel/apic/apic.c unsigned int l, h; l 2684 arch/x86/kernel/apic/apic.c rdmsr(MSR_IA32_APICBASE, l, h); l 2685 arch/x86/kernel/apic/apic.c l &= ~MSR_IA32_APICBASE_BASE; l 2686 arch/x86/kernel/apic/apic.c l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; l 2687 arch/x86/kernel/apic/apic.c wrmsr(MSR_IA32_APICBASE, l, h); l 893 arch/x86/kernel/apic/x2apic_uv_x.c int f, l; l 896 arch/x86/kernel/apic/x2apic_uv_x.c f = l = i; l 900 arch/x86/kernel/apic/x2apic_uv_x.c l = li; l 903 arch/x86/kernel/apic/x2apic_uv_x.c addr2 = (base << shift) + (l + 1) * (1ULL << m_io); l 905 arch/x86/kernel/apic/x2apic_uv_x.c if (max_io < l) l 906 arch/x86/kernel/apic/x2apic_uv_x.c max_io = l; l 119 arch/x86/kernel/cpu/amd.c u32 l, h; l 166 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K6_WHCR, l, h); l 167 arch/x86/kernel/cpu/amd.c if ((l&0x0000FFFF) == 0) { l 169 arch/x86/kernel/cpu/amd.c l = (1<<0)|((mbytes/4)<<1); l 172 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K6_WHCR, l, h); l 187 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K6_WHCR, l, h); l 188 arch/x86/kernel/cpu/amd.c if ((l&0xFFFF0000) == 0) { l 190 arch/x86/kernel/cpu/amd.c l = ((mbytes>>2)<<22)|(1<<16); l 193 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K6_WHCR, l, h); l 213 arch/x86/kernel/cpu/amd.c u32 l, h; l 234 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K7_CLK_CTL, l, h); l 235 arch/x86/kernel/cpu/amd.c if ((l & 0xfff00000) != 0x20000000) { l 237 arch/x86/kernel/cpu/amd.c l, ((l & 0x000fffff)|0x20000000)); l 238 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); l 317 arch/x86/kernel/cpu/mce/core.c struct mce_evt_llist *l; l 336 arch/x86/kernel/cpu/mce/core.c llist_for_each_entry(l, pending, llnode) { l 337 arch/x86/kernel/cpu/mce/core.c struct mce *m = &l->mce; l 345 arch/x86/kernel/cpu/mce/core.c llist_for_each_entry(l, pending, llnode) { l 346 arch/x86/kernel/cpu/mce/core.c struct mce *m = &l->mce; l 32 arch/x86/kernel/cpu/mce/genpool.c static bool is_duplicate_mce_record(struct mce_evt_llist *t, struct mce_evt_llist *l) l 39 arch/x86/kernel/cpu/mce/genpool.c llist_for_each_entry(node, &l->llnode, llnode) { l 302 arch/x86/kernel/cpu/mce/inject.c u32 l, h; l 305 arch/x86/kernel/cpu/mce/inject.c err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h); l 311 arch/x86/kernel/cpu/mce/inject.c enable ? (l |= BIT(18)) : (l &= ~BIT(18)); l 313 arch/x86/kernel/cpu/mce/inject.c err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h); l 48 arch/x86/kernel/cpu/mce/p5.c u32 l, h; l 63 arch/x86/kernel/cpu/mce/p5.c rdmsr(MSR_IA32_P5_MC_ADDR, l, h); l 64 arch/x86/kernel/cpu/mce/p5.c rdmsr(MSR_IA32_P5_MC_TYPE, l, h); l 432 arch/x86/kernel/cpu/mce/therm_throt.c u32 l, h; l 442 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 459 arch/x86/kernel/cpu/mce/therm_throt.c if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { l 468 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_THERM2_CTL, l, h); l 469 arch/x86/kernel/cpu/mce/therm_throt.c if (l & MSR_THERM2_CTL_TM_SELECT) l 471 arch/x86/kernel/cpu/mce/therm_throt.c } else if (l & MSR_IA32_MISC_ENABLE_TM2) l 479 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); l 482 arch/x86/kernel/cpu/mce/therm_throt.c (l | (THERM_INT_LOW_ENABLE l 486 arch/x86/kernel/cpu/mce/therm_throt.c l | (THERM_INT_LOW_ENABLE l 490 arch/x86/kernel/cpu/mce/therm_throt.c l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h); l 493 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); l 496 arch/x86/kernel/cpu/mce/therm_throt.c (l | (PACKAGE_THERM_INT_LOW_ENABLE l 501 arch/x86/kernel/cpu/mce/therm_throt.c l | (PACKAGE_THERM_INT_LOW_ENABLE l 506 arch/x86/kernel/cpu/mce/therm_throt.c l | (PACKAGE_THERM_INT_LOW_ENABLE l 512 arch/x86/kernel/cpu/mce/therm_throt.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 513 arch/x86/kernel/cpu/mce/therm_throt.c wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); l 516 arch/x86/kernel/cpu/mce/therm_throt.c l = apic_read(APIC_LVTTHMR); l 517 arch/x86/kernel/cpu/mce/therm_throt.c apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); l 831 arch/x86/kernel/cpu/mtrr/cleanup.c u32 l, h; l 839 arch/x86/kernel/cpu/mtrr/cleanup.c if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) l 845 arch/x86/kernel/cpu/mtrr/cleanup.c if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) == l 202 arch/x86/kernel/cpu/resctrl/core.c u32 l, h, max_cbm = BIT_MASK(20) - 1; l 207 arch/x86/kernel/cpu/resctrl/core.c rdmsr(MSR_IA32_L3_CBM_BASE, l, h); l 210 arch/x86/kernel/cpu/resctrl/core.c if (l != max_cbm) l 446 arch/x86/kernel/cpu/resctrl/core.c struct list_head *l; l 451 arch/x86/kernel/cpu/resctrl/core.c list_for_each(l, &r->domains) { l 452 arch/x86/kernel/cpu/resctrl/core.c d = list_entry(l, struct rdt_domain, list); l 462 arch/x86/kernel/cpu/resctrl/core.c *pos = l; l 141 arch/x86/kernel/hpet.c u32 i, id, period, cfg, status, channels, l, h; l 153 arch/x86/kernel/hpet.c l = hpet_readl(HPET_COUNTER); l 155 arch/x86/kernel/hpet.c pr_info("COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); l 160 arch/x86/kernel/hpet.c l = hpet_readl(HPET_Tn_CFG(i)); l 162 arch/x86/kernel/hpet.c pr_info("T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", i, l, h); l 164 arch/x86/kernel/hpet.c l = hpet_readl(HPET_Tn_CMP(i)); l 166 arch/x86/kernel/hpet.c pr_info("T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", i, l, h); l 168 arch/x86/kernel/hpet.c l = hpet_readl(HPET_Tn_ROUTE(i)); l 170 arch/x86/kernel/hpet.c pr_info("T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", i, l, h); l 854 arch/x86/kernel/ptrace.c #define R32(l,q) \ l 855 arch/x86/kernel/ptrace.c case offsetof(struct user32, regs.l): \ l 928 arch/x86/kernel/ptrace.c #define R32(l,q) \ l 929 arch/x86/kernel/ptrace.c case offsetof(struct user32, regs.l): \ l 216 arch/x86/kernel/tls.c info->lm = desc->l; l 360 arch/x86/kvm/emulate.c FOP1E(op##l, eax) \ l 391 arch/x86/kvm/emulate.c FOP2E(op##l, eax, edx) \ l 400 arch/x86/kvm/emulate.c FOP2E(op##l, eax, edx) \ l 409 arch/x86/kvm/emulate.c FOP2E(op##l, eax, cl) \ l 418 arch/x86/kvm/emulate.c FOP2E(op##l, edx, eax) \ l 432 arch/x86/kvm/emulate.c FOP3E(op##l, eax, edx, cl) \ l 809 arch/x86/kvm/emulate.c if (cs_desc->l) { l 1747 arch/x86/kvm/emulate.c if (seg_desc.d && seg_desc.l) { l 2395 arch/x86/kvm/emulate.c desc->l = (flags >> 21) & 1; l 2698 arch/x86/kvm/emulate.c cs->l = 0; /* will be adjusted later */ l 2717 arch/x86/kvm/emulate.c ss->l = 0; l 2814 arch/x86/kvm/emulate.c cs.l = 1; l 2881 arch/x86/kvm/emulate.c cs.l = 1; l 2938 arch/x86/kvm/emulate.c cs.l = 1; l 2471 arch/x86/kvm/svm.c var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; l 2667 arch/x86/kvm/svm.c s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; l 3892 arch/x86/kvm/vmx/nested.c seg.l = 1; l 145 arch/x86/kvm/vmx/ops.h _ASM_EXTABLE(1b, %l[fault]) \ l 160 arch/x86/kvm/vmx/ops.h _ASM_EXTABLE(1b, %l[fault]) \ l 2700 arch/x86/kvm/vmx/vmx.c var.l = 0; l 3110 arch/x86/kvm/vmx/vmx.c var->l = (ar >> 13) & 1; l 3150 arch/x86/kvm/vmx/vmx.c ar |= (var->l & 1) << 13; l 3198 arch/x86/kvm/vmx/vmx.c static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) l 3203 arch/x86/kvm/vmx/vmx.c *l = (ar >> 13) & 1; l 6148 arch/x86/kvm/x86.c desc->l = var.l; l 6174 arch/x86/kvm/x86.c var.l = desc->l; l 7706 arch/x86/kvm/x86.c flags |= seg->l << 21; l 7913 arch/x86/kvm/x86.c cs.l = ds.l = 0; l 8712 arch/x86/kvm/x86.c void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) l 8718 arch/x86/kvm/x86.c *l = cs.l; l 8857 arch/x86/kvm/x86.c if (sregs->efer & EFER_LMA || sregs->cs.l) l 768 arch/x86/lib/insn-eval.c switch ((desc.l << 1) | desc.d) { l 19 arch/x86/lib/msr-smp.c rdmsr(rv->msr_no, reg->l, reg->h); l 33 arch/x86/lib/msr-smp.c wrmsr(rv->msr_no, reg->l, reg->h); l 36 arch/x86/lib/msr-smp.c int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) l 45 arch/x86/lib/msr-smp.c *l = rv.reg.l; l 67 arch/x86/lib/msr-smp.c int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) l 75 arch/x86/lib/msr-smp.c rv.reg.l = l; l 158 arch/x86/lib/msr-smp.c rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h); l 166 arch/x86/lib/msr-smp.c rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h); l 169 arch/x86/lib/msr-smp.c int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) l 187 arch/x86/lib/msr-smp.c *l = rv.msr.reg.l; l 194 arch/x86/lib/msr-smp.c int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) l 202 arch/x86/lib/msr-smp.c rv.reg.l = l; l 206 arch/x86/math-emu/fpu_emu.h asmlinkage unsigned FPU_shrx(void *l, unsigned x); l 20 arch/x86/math-emu/reg_constant.c #define MAKE_REG(s, e, l, h) { l, h, \ l 303 arch/x86/math-emu/reg_ld_str.c long long l = 0; l 310 arch/x86/math-emu/reg_ld_str.c l *= 10; l 314 arch/x86/math-emu/reg_ld_str.c l += bcd >> 4; l 315 arch/x86/math-emu/reg_ld_str.c l *= 10; l 316 arch/x86/math-emu/reg_ld_str.c l += bcd & 0x0f; l 324 arch/x86/math-emu/reg_ld_str.c if (l == 0) { l 329 arch/x86/math-emu/reg_ld_str.c significand(st0_ptr) = l; l 381 arch/x86/math-emu/reg_ld_str.c unsigned long l[2]; l 387 arch/x86/math-emu/reg_ld_str.c l[0] = 0; l 388 arch/x86/math-emu/reg_ld_str.c l[1] = 0; l 417 arch/x86/math-emu/reg_ld_str.c l[0] = tmp.sigl; l 418 arch/x86/math-emu/reg_ld_str.c l[1] = tmp.sigh; l 467 arch/x86/math-emu/reg_ld_str.c l[0] = (tmp.sigl >> 11) | (tmp.sigh << 21); l 468 arch/x86/math-emu/reg_ld_str.c l[1] = ((tmp.sigh >> 11) & 0xfffff); l 481 arch/x86/math-emu/reg_ld_str.c l[1] = 0x7ff00000; /* Set to + INF */ l 490 arch/x86/math-emu/reg_ld_str.c l[1] |= (((exp + DOUBLE_Ebias) & 0x7ff) << 20); l 509 arch/x86/math-emu/reg_ld_str.c l[1] = 0x7ff00000; l 515 arch/x86/math-emu/reg_ld_str.c l[0] = l 518 arch/x86/math-emu/reg_ld_str.c l[1] = ((st0_ptr->sigh >> 11) & 0xfffff); l 524 arch/x86/math-emu/reg_ld_str.c l[1] |= (0x40000000 >> 11); l 526 arch/x86/math-emu/reg_ld_str.c l[1] |= 0x7ff00000; l 532 arch/x86/math-emu/reg_ld_str.c l[1] = 0xfff80000; l 552 arch/x86/math-emu/reg_ld_str.c l[1] |= 0x80000000; l 556 arch/x86/math-emu/reg_ld_str.c FPU_put_user(l[0], (unsigned long __user *)dfloat); l 557 arch/x86/math-emu/reg_ld_str.c FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat); l 76 arch/x86/mm/kmmio.c unsigned int l; l 77 arch/x86/mm/kmmio.c pte_t *pte = lookup_address(addr, &l); l 81 arch/x86/mm/kmmio.c addr &= page_level_mask(l); l 113 arch/x86/mm/kmmio.c unsigned int l; l 114 arch/x86/mm/kmmio.c pte_t *pte = lookup_address(addr, &l); l 118 arch/x86/mm/kmmio.c addr &= page_level_mask(l); l 236 arch/x86/mm/kmmio.c unsigned int l; l 237 arch/x86/mm/kmmio.c pte_t *pte = lookup_address(addr, &l); l 240 arch/x86/mm/kmmio.c page_base &= page_level_mask(l); l 445 arch/x86/mm/kmmio.c unsigned int l; l 454 arch/x86/mm/kmmio.c pte = lookup_address(addr, &l); l 465 arch/x86/mm/kmmio.c size += page_level_size(l); l 541 arch/x86/mm/kmmio.c unsigned int l; l 544 arch/x86/mm/kmmio.c pte = lookup_address(addr, &l); l 551 arch/x86/mm/kmmio.c size += page_level_size(l); l 41 arch/x86/pci/legacy.c u32 l; l 47 arch/x86/pci/legacy.c if (!raw_pci_read(0, busn, devfn, PCI_VENDOR_ID, 2, &l) && l 48 arch/x86/pci/legacy.c l != 0x0000 && l != 0xffff) { l 49 arch/x86/pci/legacy.c DBG("Found device at %02x:%02x [%04x]\n", busn, devfn, l); l 269 arch/x86/pci/mmconfig-shared.c u32 l, extcfg; l 273 arch/x86/pci/mmconfig-shared.c raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l); l 274 arch/x86/pci/mmconfig-shared.c vendor = l & 0xffff; l 275 arch/x86/pci/mmconfig-shared.c device = (l >> 16) & 0xffff; l 345 arch/x86/pci/mmconfig-shared.c u32 l; l 359 arch/x86/pci/mmconfig-shared.c raw_pci_ops->read(0, bus, devfn, 0, 4, &l); l 360 arch/x86/pci/mmconfig-shared.c vendor = l & 0xffff; l 361 arch/x86/pci/mmconfig-shared.c device = (l >> 16) & 0xffff; l 180 arch/x86/xen/platform-pci-unplug.c int l; l 185 arch/x86/xen/platform-pci-unplug.c l = q - p; l 188 arch/x86/xen/platform-pci-unplug.c l = strlen(p); l 190 arch/x86/xen/platform-pci-unplug.c if (!strncmp(p, "all", l)) l 192 arch/x86/xen/platform-pci-unplug.c else if (!strncmp(p, "ide-disks", l)) l 194 arch/x86/xen/platform-pci-unplug.c else if (!strncmp(p, "aux-ide-disks", l)) l 196 arch/x86/xen/platform-pci-unplug.c else if (!strncmp(p, "nics", l)) l 198 arch/x86/xen/platform-pci-unplug.c else if (!strncmp(p, "unnecessary", l)) l 200 arch/x86/xen/platform-pci-unplug.c else if (!strncmp(p, "never", l)) l 405 arch/x86/xen/pmu.c xenpmu_data->pmu.l.lapic_lvtpc = val; l 85 arch/xtensa/include/asm/processor.h #define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l) l 86 arch/xtensa/include/asm/processor.h #define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK) l 110 block/partitions/mac.c int i, l; l 113 block/partitions/mac.c l = strlen(part->name); l 116 block/partitions/mac.c for (i = 0; i <= l - 4; ++i) { l 278 block/partitions/msdos.c struct bsd_disklabel *l; l 282 block/partitions/msdos.c l = read_part_sector(state, offset + 1, §); l 283 block/partitions/msdos.c if (!l) l 285 block/partitions/msdos.c if (le32_to_cpu(l->d_magic) != BSD_DISKMAGIC) { l 293 block/partitions/msdos.c if (le16_to_cpu(l->d_npartitions) < max_partitions) l 294 block/partitions/msdos.c max_partitions = le16_to_cpu(l->d_npartitions); l 295 block/partitions/msdos.c for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) { l 306 block/partitions/msdos.c le32_to_cpu(l->d_partitions[2].p_offset) == 0) l 318 block/partitions/msdos.c if (le16_to_cpu(l->d_npartitions) > max_partitions) { l 320 block/partitions/msdos.c le16_to_cpu(l->d_npartitions) - max_partitions); l 361 block/partitions/msdos.c struct unixware_disklabel *l; l 364 block/partitions/msdos.c l = read_part_sector(state, offset + 29, §); l 365 block/partitions/msdos.c if (!l) l 367 block/partitions/msdos.c if (le32_to_cpu(l->d_magic) != UNIXWARE_DISKMAGIC || l 368 block/partitions/msdos.c le32_to_cpu(l->vtoc.v_magic) != UNIXWARE_DISKMAGIC2) { l 378 block/partitions/msdos.c p = &l->vtoc.v_slice[1]; l 380 block/partitions/msdos.c while (p - &l->vtoc.v_slice[0] < UNIXWARE_NUMSLICE) { l 307 crypto/cast5_generic.c u32 l, r, t; l 318 crypto/cast5_generic.c l = be32_to_cpu(src[0]); l 329 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); l 330 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); l 331 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); l 332 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); l 333 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); l 334 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); l 335 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); l 336 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); l 337 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); l 338 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); l 339 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); l 340 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); l 342 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); l 343 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); l 344 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); l 345 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); l 351 crypto/cast5_generic.c dst[1] = cpu_to_be32(l); l 364 crypto/cast5_generic.c u32 l, r, t; l 372 crypto/cast5_generic.c l = be32_to_cpu(src[0]); l 376 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); l 377 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); l 378 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); l 379 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); l 381 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); l 382 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); l 383 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); l 384 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); l 385 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); l 386 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); l 387 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); l 388 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); l 389 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); l 390 crypto/cast5_generic.c t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); l 391 crypto/cast5_generic.c t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); l 392 crypto/cast5_generic.c t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); l 395 crypto/cast5_generic.c dst[1] = cpu_to_be32(l); l 140 crypto/ccm.c unsigned int l = lp + 1; l 154 crypto/ccm.c return set_msg_len(info + 16 - l, cryptlen, l); l 863 crypto/ccm.c unsigned int l = min(len, bs - ctx->len); l 865 crypto/ccm.c crypto_xor(dg + ctx->len, p, l); l 866 crypto/ccm.c ctx->len +=l; l 867 crypto/ccm.c len -= l; l 868 crypto/ccm.c p += l; l 228 crypto/fcrypt.c union lc4 { __be32 l; u8 c[4]; } u; \ l 229 crypto/fcrypt.c u.l = sched ^ R; \ l 240 crypto/fcrypt.c __be32 l, r; l 245 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); l 246 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); l 247 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); l 248 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); l 249 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); l 250 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); l 251 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); l 252 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x7]); l 253 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x8]); l 254 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x9]); l 255 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xa]); l 256 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xb]); l 257 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xc]); l 258 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xd]); l 259 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xe]); l 260 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xf]); l 272 crypto/fcrypt.c __be32 l, r; l 277 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xf]); l 278 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xe]); l 279 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xd]); l 280 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xc]); l 281 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xb]); l 282 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xa]); l 283 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x9]); l 284 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x8]); l 285 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x7]); l 286 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); l 287 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); l 288 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); l 289 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); l 290 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); l 291 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); l 292 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); l 18 crypto/michael_mic.c u32 l, r; l 25 crypto/michael_mic.c u32 l, r; l 34 crypto/michael_mic.c #define michael_block(l, r) \ l 36 crypto/michael_mic.c r ^= rol32(l, 17); \ l 37 crypto/michael_mic.c l += r; \ l 38 crypto/michael_mic.c r ^= xswap(l); \ l 39 crypto/michael_mic.c l += r; \ l 40 crypto/michael_mic.c r ^= rol32(l, 3); \ l 41 crypto/michael_mic.c l += r; \ l 42 crypto/michael_mic.c r ^= ror32(l, 2); \ l 43 crypto/michael_mic.c l += r; \ l 52 crypto/michael_mic.c mctx->l = ctx->l; l 78 crypto/michael_mic.c mctx->l ^= le32_to_cpup(src); l 79 crypto/michael_mic.c michael_block(mctx->l, mctx->r); l 86 crypto/michael_mic.c mctx->l ^= le32_to_cpup(src++); l 87 crypto/michael_mic.c michael_block(mctx->l, mctx->r); l 109 crypto/michael_mic.c mctx->l ^= 0x5a; l 112 crypto/michael_mic.c mctx->l ^= data[0] | 0x5a00; l 115 crypto/michael_mic.c mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000; l 118 crypto/michael_mic.c mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) | l 122 crypto/michael_mic.c michael_block(mctx->l, mctx->r); l 124 crypto/michael_mic.c michael_block(mctx->l, mctx->r); l 126 crypto/michael_mic.c dst[0] = cpu_to_le32(mctx->l); l 145 crypto/michael_mic.c mctx->l = le32_to_cpu(data[0]); l 38 crypto/serpent_generic.c #define store_and_load_keys(x0, x1, x2, x3, s, l) \ l 39 crypto/serpent_generic.c ({ storekeys(x0, x1, x2, x3, s); loadkeys(x0, x1, x2, x3, l); }) l 535 crypto/twofish_common.c #define CALC_K(a, j, k, l, m, n) \ l 536 crypto/twofish_common.c x = CALC_K_2 (k, l, k, l, 0); \ l 548 crypto/twofish_common.c #define CALC_K192(a, j, k, l, m, n) \ l 549 crypto/twofish_common.c x = CALC_K192_2 (l, l, k, k, 0); \ l 561 crypto/twofish_common.c #define CALC_K256(a, j, k, l, m, n) \ l 562 crypto/twofish_common.c x = CALC_K256_2 (k, l, 0); \ l 1377 drivers/acpi/acpi_video.c int min, max, min_above, max_below, i, l, delta = 255; l 1382 drivers/acpi/acpi_video.c l = device->brightness->levels[i]; l 1383 drivers/acpi/acpi_video.c if (abs(l - level_current) < abs(delta)) { l 1384 drivers/acpi/acpi_video.c delta = l - level_current; l 1392 drivers/acpi/acpi_video.c l = device->brightness->levels[i]; l 1393 drivers/acpi/acpi_video.c if (l < min) l 1394 drivers/acpi/acpi_video.c min = l; l 1395 drivers/acpi/acpi_video.c if (l > max) l 1396 drivers/acpi/acpi_video.c max = l; l 1397 drivers/acpi/acpi_video.c if (l < min_above && l > level_current) l 1398 drivers/acpi/acpi_video.c min_above = l; l 1399 drivers/acpi/acpi_video.c if (l > max_below && l < level_current) l 1400 drivers/acpi/acpi_video.c max_below = l; l 1698 drivers/base/power/domain.c struct gpd_link *l, *link; l 1714 drivers/base/power/domain.c list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { l 1848 drivers/base/power/domain.c struct gpd_link *l, *link; l 1867 drivers/base/power/domain.c list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) { l 1640 drivers/block/drbd/drbd_main.c unsigned l = min_t(unsigned, len, PAGE_SIZE); l 1642 drivers/block/drbd/drbd_main.c err = _drbd_send_page(peer_device, page, 0, l, l 1646 drivers/block/drbd/drbd_main.c len -= l; l 1091 drivers/block/drbd/drbd_worker.c unsigned int l = min_t(unsigned int, len, PAGE_SIZE); l 1092 drivers/block/drbd/drbd_worker.c unsigned int i, words = l / sizeof(long); l 1103 drivers/block/drbd/drbd_worker.c len -= l; l 283 drivers/block/floppy.c static inline void fallback_on_nodma_alloc(char **addr, size_t l) l 291 drivers/block/floppy.c *addr = (char *)nodma_mem_alloc(l); l 664 drivers/block/loop.c struct loop_device *l; l 669 drivers/block/loop.c l = f->f_mapping->host->i_bdev->bd_disk->private_data; l 670 drivers/block/loop.c if (l->lo_state != Lo_bound) { l 673 drivers/block/loop.c f = l->lo_backing_file; l 2001 drivers/block/loop.c static int loop_add(struct loop_device **l, int i) l 2092 drivers/block/loop.c *l = lo; l 2119 drivers/block/loop.c struct loop_device **l = data; l 2122 drivers/block/loop.c *l = lo; l 2128 drivers/block/loop.c static int loop_lookup(struct loop_device **l, int i) l 2138 drivers/block/loop.c *l = lo; l 2147 drivers/block/loop.c *l = lo; l 39 drivers/block/paride/bpck.c #define j44(l,h) (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80)) l 50 drivers/block/paride/bpck.c { int r, l, h; l 57 drivers/block/paride/bpck.c l = r1(); l 60 drivers/block/paride/bpck.c return j44(l,h); l 151 drivers/block/paride/bpck.c { int i, l, h; l 158 drivers/block/paride/bpck.c t2(4); l = r1(); l 160 drivers/block/paride/bpck.c buf[i] = j44(l,h); l 280 drivers/block/paride/bpck.c { int i, e, l, h, om; l 291 drivers/block/paride/bpck.c t2(4); l = r1(); l 293 drivers/block/paride/bpck.c buf[i] = j44(l,h); l 47 drivers/block/paride/comm.c { int l, h, r; l 54 drivers/block/paride/comm.c w2(6); l = r1(); w0(0x80); h = r1(); w2(4); l 55 drivers/block/paride/comm.c return j44(l,h); l 110 drivers/block/paride/comm.c { int i, l, h; l 116 drivers/block/paride/comm.c w0(0); w2(6); l = r1(); l 118 drivers/block/paride/comm.c buf[i] = j44(l,h); l 43 drivers/block/paride/friq.c #define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) l 53 drivers/block/paride/friq.c { int h,l,r; l 58 drivers/block/paride/friq.c w2(6); l = r1(); l 62 drivers/block/paride/friq.c return j44(l,h); l 79 drivers/block/paride/friq.c { int h, l, k, ph; l 85 drivers/block/paride/friq.c w2(6); l = r1(); l 87 drivers/block/paride/friq.c buf[k] = j44(l,h); l 39 drivers/block/paride/frpw.c #define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) l 49 drivers/block/paride/frpw.c { int h,l,r; l 55 drivers/block/paride/frpw.c w2(6); l = r1(); l 59 drivers/block/paride/frpw.c return j44(l,h); l 76 drivers/block/paride/frpw.c { int h, l, k, ph; l 82 drivers/block/paride/frpw.c w2(6); l = r1(); l 84 drivers/block/paride/frpw.c buf[k] = j44(l,h); l 38 drivers/block/paride/on20.c { int h,l, r ; l 46 drivers/block/paride/on20.c case 0: w2(4); w2(6); l = r1(); l 49 drivers/block/paride/on20.c return j44(l,h); l 89 drivers/block/paride/on20.c { int k, l, h; l 97 drivers/block/paride/on20.c w2(6); l = r1(); w2(4); l 99 drivers/block/paride/on20.c buf[k] = j44(l,h); l 605 drivers/block/paride/pf.c int j, k, l; l 608 drivers/block/paride/pf.c l = 0; l 610 drivers/block/paride/pf.c if ((buf[k + offs] != 0x20) || (buf[k + offs] != l)) l 611 drivers/block/paride/pf.c l = targ[j++] = buf[k + offs]; l 612 drivers/block/paride/pf.c if (l == 0x20) l 424 drivers/block/paride/pg.c char l = '\0'; l 429 drivers/block/paride/pg.c if (c != ' ' && c != l) l 430 drivers/block/paride/pg.c l = *targ++ = c; l 432 drivers/block/paride/pg.c if (l == ' ') l 506 drivers/block/paride/pt.c int j, k, l; l 509 drivers/block/paride/pt.c l = 0; l 511 drivers/block/paride/pt.c if ((buf[k + offs] != 0x20) || (buf[k + offs] != l)) l 512 drivers/block/paride/pt.c l = targ[j++] = buf[k + offs]; l 513 drivers/block/paride/pt.c if (l == 0x20) l 2629 drivers/block/rbd.c static bool rbd_layout_is_fancy(struct ceph_file_layout *l) l 2631 drivers/block/rbd.c return l->stripe_unit != l->object_size; l 1826 drivers/bus/ti-sysc.c struct clk_lookup *l; l 1843 drivers/bus/ti-sysc.c l = clkdev_create(clk, name, dev_name(child)); l 1844 drivers/bus/ti-sysc.c if (!l) l 75 drivers/char/agp/isoch.c u32 l; l 127 drivers/char/agp/isoch.c target.l = (tnistat >> 3) & 0x7; l 176 drivers/char/dsp56k.c dsp56k_host_interface.data.l = 3; /* Magic execute */ l 231 drivers/char/dsp56k.c put_user(dsp56k_host_interface.data.l, data+n++)); l 293 drivers/char/dsp56k.c get_user(dsp56k_host_interface.data.l, data+n++)); l 179 drivers/char/ipmi/ipmi_watchdog.c int l; l 184 drivers/char/ipmi/ipmi_watchdog.c l = simple_strtoul(val, &endp, 0); l 188 drivers/char/ipmi/ipmi_watchdog.c *((int *)kp->arg) = l; l 1504 drivers/char/random.c unsigned long l[LONGS(20)]; l 1518 drivers/char/random.c hash.l[i] = v; l 778 drivers/char/rtc.c unsigned long l; l 786 drivers/char/rtc.c l = rtc_irq_data; l 789 drivers/char/rtc.c if (l != 0) l 307 drivers/char/tpm/tpm_i2c_infineon.c #define TPM_ACCESS(l) (0x0000 | ((l) << 4)) l 308 drivers/char/tpm/tpm_i2c_infineon.c #define TPM_STS(l) (0x0001 | ((l) << 4)) l 309 drivers/char/tpm/tpm_i2c_infineon.c #define TPM_DATA_FIFO(l) (0x0005 | ((l) << 4)) l 310 drivers/char/tpm/tpm_i2c_infineon.c #define TPM_DID_VID(l) (0x0006 | ((l) << 4)) l 98 drivers/char/tpm/tpm_tis_core.c static int wait_startup(struct tpm_chip *chip, int l) l 107 drivers/char/tpm/tpm_tis_core.c rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); l 118 drivers/char/tpm/tpm_tis_core.c static bool check_locality(struct tpm_chip *chip, int l) l 124 drivers/char/tpm/tpm_tis_core.c rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); l 130 drivers/char/tpm/tpm_tis_core.c priv->locality = l; l 137 drivers/char/tpm/tpm_tis_core.c static bool locality_inactive(struct tpm_chip *chip, int l) l 143 drivers/char/tpm/tpm_tis_core.c rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access); l 154 drivers/char/tpm/tpm_tis_core.c static int release_locality(struct tpm_chip *chip, int l) l 160 drivers/char/tpm/tpm_tis_core.c tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); l 171 drivers/char/tpm/tpm_tis_core.c (locality_inactive(chip, l)), l 183 drivers/char/tpm/tpm_tis_core.c if (locality_inactive(chip, l)) l 191 drivers/char/tpm/tpm_tis_core.c static int request_locality(struct tpm_chip *chip, int l) l 197 drivers/char/tpm/tpm_tis_core.c if (check_locality(chip, l)) l 198 drivers/char/tpm/tpm_tis_core.c return l; l 200 drivers/char/tpm/tpm_tis_core.c rc = tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_REQUEST_USE); l 213 drivers/char/tpm/tpm_tis_core.c (chip, l)), l 216 drivers/char/tpm/tpm_tis_core.c return l; l 224 drivers/char/tpm/tpm_tis_core.c if (check_locality(chip, l)) l 225 drivers/char/tpm/tpm_tis_core.c return l; l 66 drivers/char/tpm/tpm_tis_core.h #define TPM_ACCESS(l) (0x0000 | ((l) << 12)) l 67 drivers/char/tpm/tpm_tis_core.h #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) l 68 drivers/char/tpm/tpm_tis_core.h #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) l 69 drivers/char/tpm/tpm_tis_core.h #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) l 70 drivers/char/tpm/tpm_tis_core.h #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) l 71 drivers/char/tpm/tpm_tis_core.h #define TPM_STS(l) (0x0018 | ((l) << 12)) l 72 drivers/char/tpm/tpm_tis_core.h #define TPM_STS3(l) (0x001b | ((l) << 12)) l 73 drivers/char/tpm/tpm_tis_core.h #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) l 75 drivers/char/tpm/tpm_tis_core.h #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) l 76 drivers/char/tpm/tpm_tis_core.h #define TPM_RID(l) (0x0F04 | ((l) << 12)) l 271 drivers/clk/clkdev.c struct clk_lookup *l; l 276 drivers/clk/clkdev.c l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL, l 280 drivers/clk/clkdev.c return l ? 0 : -ENODEV; l 384 drivers/clk/clkdev.c struct clk_lookup **l = res; l 386 drivers/clk/clkdev.c return *l == data; l 47 drivers/clk/meson/axg.c .l = { l 106 drivers/clk/meson/axg.c .l = { l 208 drivers/clk/meson/axg.c .l = { l 280 drivers/clk/meson/axg.c .l = { l 733 drivers/clk/meson/axg.c .l = { l 280 drivers/clk/meson/clk-pll.c if (meson_parm_read(clk->map, &pll->l)) l 309 drivers/clk/meson/clk-pll.c !meson_parm_read(clk->map, &pll->l)) l 37 drivers/clk/meson/clk-pll.h struct parm l; l 51 drivers/clk/meson/g12a.c .l = { l 115 drivers/clk/meson/g12a.c .l = { l 174 drivers/clk/meson/g12a.c .l = { l 1632 drivers/clk/meson/g12a.c .l = { l 1697 drivers/clk/meson/g12a.c .l = { l 1772 drivers/clk/meson/g12a.c .l = { l 1864 drivers/clk/meson/g12a.c .l = { l 1958 drivers/clk/meson/g12a.c .l = { l 107 drivers/clk/meson/gxbb.c .l = { l 184 drivers/clk/meson/gxbb.c .l = { l 238 drivers/clk/meson/gxbb.c .l = { l 389 drivers/clk/meson/gxbb.c .l = { l 451 drivers/clk/meson/gxbb.c .l = { l 505 drivers/clk/meson/gxbb.c .l = { l 86 drivers/clk/meson/meson8b.c .l = { l 150 drivers/clk/meson/meson8b.c .l = { l 225 drivers/clk/meson/meson8b.c .l = { l 1935 drivers/clk/meson/meson8b.c .l = { l 240 drivers/clk/pxa/clk-pxa25x.c unsigned int l, m, n2, t; l 244 drivers/clk/pxa/clk-pxa25x.c l = L_clk_mult[(cccr >> 0) & 0x1f]; l 248 drivers/clk/pxa/clk-pxa25x.c return m * l * n2 * parent_rate / 2; l 228 drivers/clk/pxa/clk-pxa27x.c unsigned int l, L, n2, N; l 235 drivers/clk/pxa/clk-pxa27x.c l = ccsr & CCSR_L_MASK; l 237 drivers/clk/pxa/clk-pxa27x.c L = l * parent_rate; l 273 drivers/clk/pxa/clk-pxa27x.c unsigned int l, osc_forced; l 277 drivers/clk/pxa/clk-pxa27x.c l = ccsr & CCSR_L_MASK; l 286 drivers/clk/pxa/clk-pxa27x.c if (l <= 7) l 288 drivers/clk/pxa/clk-pxa27x.c if (l <= 16) l 417 drivers/clk/pxa/clk-pxa27x.c unsigned int a, l, osc_forced; l 423 drivers/clk/pxa/clk-pxa27x.c l = ccsr & CCSR_L_MASK; l 427 drivers/clk/pxa/clk-pxa27x.c if (l <= 10) l 429 drivers/clk/pxa/clk-pxa27x.c if (l <= 20) l 1706 drivers/clk/qcom/camcc-sdm845.c cam_cc_pll_config.l = 0x1f; l 1710 drivers/clk/qcom/camcc-sdm845.c cam_cc_pll_config.l = 0x2a; l 1714 drivers/clk/qcom/camcc-sdm845.c cam_cc_pll_config.l = 0x32; l 1718 drivers/clk/qcom/camcc-sdm845.c cam_cc_pll_config.l = 0x14; l 213 drivers/clk/qcom/clk-alpha-pll.c regmap_write(regmap, PLL_L_VAL(pll), config->l); l 407 drivers/clk/qcom/clk-alpha-pll.c alpha_pll_calc_rate(u64 prate, u32 l, u32 a, u32 alpha_width) l 409 drivers/clk/qcom/clk-alpha-pll.c return (prate * l) + ((prate * a) >> ALPHA_SHIFT(alpha_width)); l 413 drivers/clk/qcom/clk-alpha-pll.c alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a, l 421 drivers/clk/qcom/clk-alpha-pll.c *l = quotient; l 437 drivers/clk/qcom/clk-alpha-pll.c return alpha_pll_calc_rate(prate, *l, *a, alpha_width); l 456 drivers/clk/qcom/clk-alpha-pll.c u32 l, low, high, ctl; l 461 drivers/clk/qcom/clk-alpha-pll.c regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); l 478 drivers/clk/qcom/clk-alpha-pll.c return alpha_pll_calc_rate(prate, l, a, alpha_width); l 541 drivers/clk/qcom/clk-alpha-pll.c u32 l, alpha_width = pll_alpha_width(pll); l 544 drivers/clk/qcom/clk-alpha-pll.c rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width); l 551 drivers/clk/qcom/clk-alpha-pll.c regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); l 591 drivers/clk/qcom/clk-alpha-pll.c u32 l, alpha_width = pll_alpha_width(pll); l 595 drivers/clk/qcom/clk-alpha-pll.c rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width); l 606 drivers/clk/qcom/clk-alpha-pll.c alpha_huayra_pll_calc_rate(u64 prate, u32 l, u32 a) l 613 drivers/clk/qcom/clk-alpha-pll.c l -= 1; l 615 drivers/clk/qcom/clk-alpha-pll.c return (prate * l) + (prate * a >> PLL_HUAYRA_ALPHA_WIDTH); l 620 drivers/clk/qcom/clk-alpha-pll.c u32 *l, u32 *a) l 627 drivers/clk/qcom/clk-alpha-pll.c *l = quotient; l 646 drivers/clk/qcom/clk-alpha-pll.c *l += 1; l 649 drivers/clk/qcom/clk-alpha-pll.c return alpha_huayra_pll_calc_rate(prate, *l, *a); l 657 drivers/clk/qcom/clk-alpha-pll.c u32 l, alpha = 0, ctl, alpha_m, alpha_n; l 659 drivers/clk/qcom/clk-alpha-pll.c regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); l 682 drivers/clk/qcom/clk-alpha-pll.c return alpha_huayra_pll_calc_rate(rate, l, alpha); l 687 drivers/clk/qcom/clk-alpha-pll.c rate *= l; l 703 drivers/clk/qcom/clk-alpha-pll.c return alpha_huayra_pll_calc_rate(rate, l, alpha); l 710 drivers/clk/qcom/clk-alpha-pll.c u32 l, a, ctl, cur_alpha = 0; l 712 drivers/clk/qcom/clk-alpha-pll.c rate = alpha_huayra_pll_round_rate(rate, prate, &l, &a); l 730 drivers/clk/qcom/clk-alpha-pll.c regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); l 736 drivers/clk/qcom/clk-alpha-pll.c regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); l 752 drivers/clk/qcom/clk-alpha-pll.c u32 l, a; l 754 drivers/clk/qcom/clk-alpha-pll.c return alpha_huayra_pll_round_rate(rate, *prate, &l, &a); l 853 drivers/clk/qcom/clk-alpha-pll.c u32 l, frac; l 856 drivers/clk/qcom/clk-alpha-pll.c regmap_read(regmap, PLL_L_VAL(pll), &l); l 859 drivers/clk/qcom/clk-alpha-pll.c return alpha_pll_calc_rate(prate, l, frac, ALPHA_REG_16BIT_WIDTH); l 867 drivers/clk/qcom/clk-alpha-pll.c u32 l; l 871 drivers/clk/qcom/clk-alpha-pll.c &l, &a, ALPHA_REG_16BIT_WIDTH); l 1017 drivers/clk/qcom/clk-alpha-pll.c if (config->l) l 1018 drivers/clk/qcom/clk-alpha-pll.c regmap_write(regmap, PLL_L_VAL(pll), config->l); l 1132 drivers/clk/qcom/clk-alpha-pll.c u32 l, frac, alpha_width = pll_alpha_width(pll); l 1134 drivers/clk/qcom/clk-alpha-pll.c regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); l 1137 drivers/clk/qcom/clk-alpha-pll.c return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); l 1144 drivers/clk/qcom/clk-alpha-pll.c u32 val, l, alpha_width = pll_alpha_width(pll); l 1153 drivers/clk/qcom/clk-alpha-pll.c rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width); l 1164 drivers/clk/qcom/clk-alpha-pll.c regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); l 92 drivers/clk/qcom/clk-alpha-pll.h u32 l; l 83 drivers/clk/qcom/clk-pll.c u32 l, m, n, config; l 87 drivers/clk/qcom/clk-pll.c regmap_read(pll->clkr.regmap, pll->l_reg, &l); l 91 drivers/clk/qcom/clk-pll.c l &= 0x3ff; l 95 drivers/clk/qcom/clk-pll.c rate = parent_rate * l; l 159 drivers/clk/qcom/clk-pll.c regmap_update_bits(pll->clkr.regmap, pll->l_reg, 0x3ff, f->l); l 224 drivers/clk/qcom/clk-pll.c regmap_write(regmap, pll->l_reg, config->l); l 319 drivers/clk/qcom/clk-pll.c regmap_update_bits(pll->clkr.regmap, pll->l_reg, 0x3ff, f->l); l 21 drivers/clk/qcom/clk-pll.h u16 l; l 62 drivers/clk/qcom/clk-pll.h u16 l; l 952 drivers/clk/qcom/clk-rcg2.c static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, l 961 drivers/clk/qcom/clk-rcg2.c regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); l 984 drivers/clk/qcom/clk-rcg2.c regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l), l 989 drivers/clk/qcom/clk-rcg2.c regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l), l 653 drivers/clk/qcom/dispcc-sdm845.c disp_cc_pll0_config.l = 0x2c; l 217 drivers/clk/qcom/gcc-ipq806x.c .l = _l, \ l 355 drivers/clk/qcom/gcc-qcs404.c .l = 48, l 55 drivers/clk/qcom/gpucc-sdm845.c .l = 0x1a, l 43 drivers/clk/qcom/lcc-ipq806x.c .l = 0xf, l 3029 drivers/clk/qcom/mmcc-apq8084.c .l = 60, l 3043 drivers/clk/qcom/mmcc-apq8084.c .l = 48, l 141 drivers/clk/qcom/mmcc-msm8960.c .l = 33, l 2308 drivers/clk/qcom/mmcc-msm8974.c .l = 60, l 2322 drivers/clk/qcom/mmcc-msm8974.c .l = 48, l 46 drivers/clk/qcom/videocc-sdm845.c .l = 0x10, l 61 drivers/clocksource/arc_timer.c u32 l, h; l 80 drivers/clocksource/arc_timer.c l = read_aux_reg(ARC_REG_MCIP_READBACK); l 87 drivers/clocksource/arc_timer.c return (((u64)h) << 32) | l; l 131 drivers/clocksource/arc_timer.c u32 l, h; l 140 drivers/clocksource/arc_timer.c l = read_aux_reg(AUX_RTC_LOW); l 145 drivers/clocksource/arc_timer.c return (((u64)h) << 32) | l; l 114 drivers/clocksource/timer-ti-dm.c u32 l, timeout = 100000; l 122 drivers/clocksource/timer-ti-dm.c l = __omap_dm_timer_read(timer, l 124 drivers/clocksource/timer-ti-dm.c } while (!l && timeout--); l 132 drivers/clocksource/timer-ti-dm.c l = __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0); l 133 drivers/clocksource/timer-ti-dm.c l |= 0x2 << 0x3; l 134 drivers/clocksource/timer-ti-dm.c __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l, 0); l 475 drivers/clocksource/timer-ti-dm.c u32 l; l 477 drivers/clocksource/timer-ti-dm.c l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); l 478 drivers/clocksource/timer-ti-dm.c if (l & OMAP_TIMER_CTRL_ST) { l 522 drivers/clocksource/timer-ti-dm.c u32 l; l 529 drivers/clocksource/timer-ti-dm.c l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); l 530 drivers/clocksource/timer-ti-dm.c if (!(l & OMAP_TIMER_CTRL_ST)) { l 531 drivers/clocksource/timer-ti-dm.c l |= OMAP_TIMER_CTRL_ST; l 532 drivers/clocksource/timer-ti-dm.c omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); l 536 drivers/clocksource/timer-ti-dm.c timer->context.tclr = l; l 566 drivers/clocksource/timer-ti-dm.c u32 l; l 572 drivers/clocksource/timer-ti-dm.c l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); l 574 drivers/clocksource/timer-ti-dm.c l |= OMAP_TIMER_CTRL_AR; l 576 drivers/clocksource/timer-ti-dm.c l &= ~OMAP_TIMER_CTRL_AR; l 577 drivers/clocksource/timer-ti-dm.c omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); l 582 drivers/clocksource/timer-ti-dm.c timer->context.tclr = l; l 591 drivers/clocksource/timer-ti-dm.c u32 l; l 597 drivers/clocksource/timer-ti-dm.c l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); l 599 drivers/clocksource/timer-ti-dm.c l |= OMAP_TIMER_CTRL_CE; l 601 drivers/clocksource/timer-ti-dm.c l &= ~OMAP_TIMER_CTRL_CE; l 603 drivers/clocksource/timer-ti-dm.c omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); l 606 drivers/clocksource/timer-ti-dm.c timer->context.tclr = l; l 615 drivers/clocksource/timer-ti-dm.c u32 l; l 621 drivers/clocksource/timer-ti-dm.c l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); l 622 drivers/clocksource/timer-ti-dm.c l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM | l 625 drivers/clocksource/timer-ti-dm.c l |= OMAP_TIMER_CTRL_SCPWM; l 627 drivers/clocksource/timer-ti-dm.c l |= OMAP_TIMER_CTRL_PT; l 628 drivers/clocksource/timer-ti-dm.c l |= trigger << 10; l 629 drivers/clocksource/timer-ti-dm.c omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); l 632 drivers/clocksource/timer-ti-dm.c timer->context.tclr = l; l 640 drivers/clocksource/timer-ti-dm.c u32 l; l 646 drivers/clocksource/timer-ti-dm.c l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG); l 647 drivers/clocksource/timer-ti-dm.c l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2)); l 649 drivers/clocksource/timer-ti-dm.c l |= OMAP_TIMER_CTRL_PRE; l 650 drivers/clocksource/timer-ti-dm.c l |= prescaler << 2; l 652 drivers/clocksource/timer-ti-dm.c omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l); l 655 drivers/clocksource/timer-ti-dm.c timer->context.tclr = l; l 685 drivers/clocksource/timer-ti-dm.c u32 l = mask; l 693 drivers/clocksource/timer-ti-dm.c l = readl_relaxed(timer->irq_ena) & ~mask; l 695 drivers/clocksource/timer-ti-dm.c writel_relaxed(l, timer->irq_dis); l 696 drivers/clocksource/timer-ti-dm.c l = omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask; l 697 drivers/clocksource/timer-ti-dm.c omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, l); l 708 drivers/clocksource/timer-ti-dm.c unsigned int l; l 715 drivers/clocksource/timer-ti-dm.c l = readl_relaxed(timer->irq_stat); l 717 drivers/clocksource/timer-ti-dm.c return l; l 316 drivers/counter/counter.c struct list_head l; l 355 drivers/counter/counter.c list_add(&counter_attr->l, attr_list); l 469 drivers/counter/counter.c list_for_each_entry_safe(p, n, attr_list, l) { l 473 drivers/counter/counter.c list_del(&p->l); l 1355 drivers/counter/counter.c list_for_each_entry(p, &group->attr_list, l) l 54 drivers/cpufreq/amd_freq_sensitivity.c &actual.l, &actual.h); l 56 drivers/cpufreq/amd_freq_sensitivity.c &reference.l, &reference.h); l 54 drivers/cpufreq/p4-clockmod.c u32 l, h; l 59 drivers/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); l 61 drivers/cpufreq/p4-clockmod.c if (l & 0x01) l 68 drivers/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); l 71 drivers/cpufreq/p4-clockmod.c wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); l 80 drivers/cpufreq/p4-clockmod.c l = (l & ~14); l 81 drivers/cpufreq/p4-clockmod.c l = l | (1<<4) | ((newstate & 0x7)<<1); l 82 drivers/cpufreq/p4-clockmod.c wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); l 208 drivers/cpufreq/p4-clockmod.c u32 l, h; l 210 drivers/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); l 212 drivers/cpufreq/p4-clockmod.c if (l & 0x10) { l 213 drivers/cpufreq/p4-clockmod.c l = l >> 1; l 214 drivers/cpufreq/p4-clockmod.c l &= 0x7; l 216 drivers/cpufreq/p4-clockmod.c l = DC_DISABLE; l 218 drivers/cpufreq/p4-clockmod.c if (l != DC_DISABLE) l 219 drivers/cpufreq/p4-clockmod.c return stock_freq * l / 8; l 325 drivers/cpufreq/speedstep-centrino.c unsigned l, h; l 328 drivers/cpufreq/speedstep-centrino.c rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h); l 329 drivers/cpufreq/speedstep-centrino.c clock_freq = extract_clock(l, cpu, 0); l 338 drivers/cpufreq/speedstep-centrino.c rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h); l 339 drivers/cpufreq/speedstep-centrino.c clock_freq = extract_clock(l, cpu, 1); l 348 drivers/cpufreq/speedstep-centrino.c unsigned l, h; l 381 drivers/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 383 drivers/cpufreq/speedstep-centrino.c if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { l 384 drivers/cpufreq/speedstep-centrino.c l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; l 385 drivers/cpufreq/speedstep-centrino.c pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l); l 386 drivers/cpufreq/speedstep-centrino.c wrmsr(MSR_IA32_MISC_ENABLE, l, h); l 389 drivers/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 390 drivers/cpufreq/speedstep-centrino.c if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { l 739 drivers/cpuidle/cpuidle.c unsigned long l, void *v) l 1552 drivers/crypto/ccree/cc_aead.c unsigned int l = lp + 1; /* This is L' of RFC 3610. */ l 1568 drivers/crypto/ccree/cc_aead.c if (l < 2 || l > 8) { l 1581 drivers/crypto/ccree/cc_aead.c rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */ l 2746 drivers/crypto/chelsio/chcr_algo.c unsigned int l, lp, m; l 2757 drivers/crypto/chelsio/chcr_algo.c l = lp + 1; l 2765 drivers/crypto/chelsio/chcr_algo.c rc = set_msg_len(b0 + 16 - l, l 2767 drivers/crypto/chelsio/chcr_algo.c req->cryptlen - m : req->cryptlen, l); l 369 drivers/crypto/hifn_795x.c volatile __le32 l; l 1208 drivers/crypto/hifn_795x.c dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID | l 1212 drivers/crypto/hifn_795x.c dma->cmdr[dma->cmdi].l = __cpu_to_le32( l 1217 drivers/crypto/hifn_795x.c dma->cmdr[dma->cmdi - 1].l |= __cpu_to_le32(HIFN_D_VALID); l 1242 drivers/crypto/hifn_795x.c dma->srcr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | l 1246 drivers/crypto/hifn_795x.c dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID | l 1267 drivers/crypto/hifn_795x.c dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT | l 1275 drivers/crypto/hifn_795x.c dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID | l 1299 drivers/crypto/hifn_795x.c dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID | l 1303 drivers/crypto/hifn_795x.c dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID | l 1721 drivers/crypto/hifn_795x.c if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID)) l 1739 drivers/crypto/hifn_795x.c if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID)) l 1749 drivers/crypto/hifn_795x.c if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID)) l 1759 drivers/crypto/hifn_795x.c if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID)) l 1826 drivers/crypto/hifn_795x.c pr_info("%x.%p ", dma->resr[i].l, dev->sa[i]); l 1922 drivers/crypto/hifn_795x.c (d->l & __cpu_to_le32(HIFN_D_VALID)) ? -ENODEV : 0); l 461 drivers/crypto/img-hash.c u32 u, l; l 469 drivers/crypto/img-hash.c l = nbits; l 471 drivers/crypto/img-hash.c img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l); l 96 drivers/crypto/nx/nx-842-powernv.c unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa)); l 101 drivers/crypto/nx/nx-842-powernv.c dde->length = cpu_to_be32(l); l 104 drivers/crypto/nx/nx-842-powernv.c return l; l 136 drivers/crypto/nx/nx-aes-ccm.c unsigned int l, lp, m = authsize; l 142 drivers/crypto/nx/nx-aes-ccm.c l = lp + 1; l 151 drivers/crypto/nx/nx-aes-ccm.c rc = set_msg_len(b0 + 16 - l, cryptlen, l); l 1318 drivers/crypto/sahara.c unsigned int i, j, k, l; l 1333 drivers/crypto/sahara.c for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) { l 1334 drivers/crypto/sahara.c err = crypto_register_ahash(&sha_v4_algs[l]); l 1342 drivers/crypto/sahara.c for (j = 0; j < l; j++) l 1319 drivers/dma/coh901318.c struct coh901318_lli *l = lli; l 1322 drivers/dma/coh901318.c while (l) { l 1325 drivers/dma/coh901318.c i, l, l->control, &l->src_addr, &l->dst_addr, l 1326 drivers/dma/coh901318.c &l->link_addr, l->virt_link_addr); l 1328 drivers/dma/coh901318.c l = l->virt_link_addr; l 115 drivers/dma/coh901318_lli.c struct coh901318_lli *l; l 121 drivers/dma/coh901318_lli.c l = *lli; l 123 drivers/dma/coh901318_lli.c if (l == NULL) l 128 drivers/dma/coh901318_lli.c while (l->link_addr) { l 129 drivers/dma/coh901318_lli.c next = l->virt_link_addr; l 130 drivers/dma/coh901318_lli.c dma_pool_free(pool->dmapool, l, l->phy_this); l 132 drivers/dma/coh901318_lli.c l = next; l 134 drivers/dma/coh901318_lli.c dma_pool_free(pool->dmapool, l, l->phy_this); l 164 drivers/dma/ppc4xx/adma.c if (i && !cb->ops[i].h && !cb->ops[i].l) l 167 drivers/dma/ppc4xx/adma.c i, cb->ops[i].h, cb->ops[i].l); l 548 drivers/dma/ppc4xx/adma.c xor_hw_desc->ops[src_idx].l = addrl; l 1863 drivers/dma/ppc4xx/adma.c xcb->ops[xor_arg_no].l = addr; l 81 drivers/dma/ppc4xx/xor.h u32 l; l 3051 drivers/edac/amd64_edac.c nbe = reg->l & MSR_MCGCTL_NBE; l 3086 drivers/edac/amd64_edac.c if (reg->l & MSR_MCGCTL_NBE) l 3089 drivers/edac/amd64_edac.c reg->l |= MSR_MCGCTL_NBE; l 3095 drivers/edac/amd64_edac.c reg->l &= ~MSR_MCGCTL_NBE; l 158 drivers/firewire/core-topology.c static inline struct fw_node *fw_node(struct list_head *l) l 160 drivers/firewire/core-topology.c return list_entry(l, struct fw_node, link); l 93 drivers/firmware/dmi-id.c ssize_t l, left; l 112 drivers/firmware/dmi-id.c l = scnprintf(p, left, ":%s%s", f->prefix, t); l 115 drivers/firmware/dmi-id.c p += l; l 116 drivers/firmware/dmi-id.c left -= l; l 291 drivers/firmware/efi/libstub/arm-stub.c static int cmp_mem_desc(const void *l, const void *r) l 293 drivers/firmware/efi/libstub/arm-stub.c const efi_memory_desc_t *left = l, *right = r; l 342 drivers/firmware/efi/libstub/arm-stub.c int l; l 357 drivers/firmware/efi/libstub/arm-stub.c for (l = 0; l < map_size; l += desc_size, prev = in) { l 360 drivers/firmware/efi/libstub/arm-stub.c in = (void *)memory_map + l; l 73 drivers/firmware/efi/libstub/arm32-stub.c unsigned long l; l 129 drivers/firmware/efi/libstub/arm32-stub.c for (l = 0; l < map_size; l += desc_size) { l 133 drivers/firmware/efi/libstub/arm32-stub.c desc = (void *)memory_map + l; l 335 drivers/firmware/efi/libstub/fdt.c int l; l 343 drivers/firmware/efi/libstub/fdt.c for (l = 0; l < map_size; l += desc_size) { l 344 drivers/firmware/efi/libstub/fdt.c efi_memory_desc_t *p = (void *)memory_map + l; l 119 drivers/gpio/gpio-omap.c u32 l = BIT(offset); l 123 drivers/gpio/gpio-omap.c bank->context.dataout |= l; l 126 drivers/gpio/gpio-omap.c bank->context.dataout &= ~l; l 129 drivers/gpio/gpio-omap.c writel_relaxed(l, reg); l 182 drivers/gpio/gpio-omap.c u32 l; l 194 drivers/gpio/gpio-omap.c l = BIT(offset); l 199 drivers/gpio/gpio-omap.c val = omap_gpio_rmw(bank->base + bank->regs->debounce_en, l, enable); l 335 drivers/gpio/gpio-omap.c u32 l = 0; l 342 drivers/gpio/gpio-omap.c l = readl_relaxed(reg); l 346 drivers/gpio/gpio-omap.c l |= BIT(gpio); l 348 drivers/gpio/gpio-omap.c l &= ~(BIT(gpio)); l 352 drivers/gpio/gpio-omap.c writel_relaxed(l, reg); l 360 drivers/gpio/gpio-omap.c l = readl_relaxed(reg); l 361 drivers/gpio/gpio-omap.c l &= ~(3 << (gpio << 1)); l 363 drivers/gpio/gpio-omap.c l |= 2 << (gpio << 1); l 365 drivers/gpio/gpio-omap.c l |= BIT(gpio << 1); l 366 drivers/gpio/gpio-omap.c writel_relaxed(l, reg); l 493 drivers/gpio/gpio-omap.c u32 l; l 497 drivers/gpio/gpio-omap.c l = readl_relaxed(reg); l 499 drivers/gpio/gpio-omap.c l = ~l; l 500 drivers/gpio/gpio-omap.c l &= mask; l 501 drivers/gpio/gpio-omap.c return l; l 922 drivers/gpio/gpio-omap.c u32 l; l 925 drivers/gpio/gpio-omap.c l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask); l 926 drivers/gpio/gpio-omap.c writel_relaxed(l, reg); l 927 drivers/gpio/gpio-omap.c bank->context.dataout = l; l 951 drivers/gpio/gpio-omap.c u32 l = 0xffffffff; l 954 drivers/gpio/gpio-omap.c l = 0xffff; l 957 drivers/gpio/gpio-omap.c writel_relaxed(l, bank->base + bank->regs->irqenable); l 961 drivers/gpio/gpio-omap.c omap_gpio_rmw(base + bank->regs->irqenable, l, l 963 drivers/gpio/gpio-omap.c omap_gpio_rmw(base + bank->regs->irqstatus, l, l 1145 drivers/gpio/gpio-omap.c u32 l = 0, gen, gen0, gen1; l 1182 drivers/gpio/gpio-omap.c l = readl_relaxed(bank->base + bank->regs->datain); l 1190 drivers/gpio/gpio-omap.c l ^= bank->saved_datain; l 1191 drivers/gpio/gpio-omap.c l &= bank->enabled_non_wakeup_gpios; l 1197 drivers/gpio/gpio-omap.c gen0 = l & bank->context.fallingdetect; l 1200 drivers/gpio/gpio-omap.c gen1 = l & bank->context.risingdetect; l 1204 drivers/gpio/gpio-omap.c gen = l & (~(bank->context.fallingdetect) & l 1223 drivers/gpio/gpio-omap.c writel_relaxed(old0 | l, bank->base + l 1225 drivers/gpio/gpio-omap.c writel_relaxed(old1 | l, bank->base + l 1142 drivers/gpu/drm/amd/amdgpu/amdgpu.h #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) l 1145 drivers/gpu/drm/amd/amdgpu/amdgpu.h #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) l 27 drivers/gpu/drm/amd/amdgpu/amdgpu_display.h #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) l 295 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h #define amdgpu_dpm_force_performance_level(adev, l) \ l 296 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l))) l 1060 drivers/gpu/drm/amd/display/dc/core/dc.c int i, k, l; l 1127 drivers/gpu/drm/amd/display/dc/core/dc.c for (l = 0 ; pipe && l < context->stream_count; l++) { l 1128 drivers/gpu/drm/amd/display/dc/core/dc.c if (context->streams[l] && l 1129 drivers/gpu/drm/amd/display/dc/core/dc.c context->streams[l] == pipe->stream && l 318 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l, l 354 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c l = (ax + wx - 1) / wx; l 359 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22; l 341 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l, l 377 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c l = (ax + wx - 1) / wx; l 382 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22; l 513 drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, S, ix, wx, p, l0, a, ax, l, l 549 drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c l = (ax + wx - 1) / wx; l 554 drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c Delay = l * wx * (numSlices - 1) + ax + S + lstall + 22; l 77 drivers/gpu/drm/bridge/cdns-dsi.c #define DATA_LANE_ULPM_REQ(l) BIT(9 + (l)) l 126 drivers/gpu/drm/bridge/cdns-dsi.c #define DATA_LANE_RDY(l) BIT(2 + (l)) l 134 drivers/gpu/drm/bridge/cdns-dsi.c #define ERR_CONT_LP(x, l) BIT(18 + ((x) * 4) + (l)) l 135 drivers/gpu/drm/bridge/cdns-dsi.c #define ERR_CONTROL(l) BIT(14 + (l)) l 136 drivers/gpu/drm/bridge/cdns-dsi.c #define ERR_SYNESC(l) BIT(10 + (l)) l 137 drivers/gpu/drm/bridge/cdns-dsi.c #define ERR_ESC(l) BIT(6 + (l)) l 140 drivers/gpu/drm/bridge/cdns-dsi.c #define ERR_CONT_LP_EDGE(x, l) BIT(12 + ((x) * 4) + (l)) l 141 drivers/gpu/drm/bridge/cdns-dsi.c #define ERR_CONTROL_EDGE(l) BIT(8 + (l)) l 142 drivers/gpu/drm/bridge/cdns-dsi.c #define ERR_SYN_ESC_EDGE(l) BIT(4 + (l)) l 143 drivers/gpu/drm/bridge/cdns-dsi.c #define ERR_ESC_EDGE(l) BIT(0 + (l)) l 154 drivers/gpu/drm/bridge/cdns-dsi.c #define DATA_LANE_STATE(l, val) \ l 155 drivers/gpu/drm/bridge/cdns-dsi.c (((val) >> (2 + 2 * (l) + ((l) ? 1 : 0))) & GENMASK((l) ? 1 : 2, 0)) l 394 drivers/gpu/drm/bridge/cdns-dsi.c #define DAT_REMAP_CFG(b, l) ((l) << ((b) * 8)) l 69 drivers/gpu/drm/bridge/sii902x.c #define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK) l 58 drivers/gpu/drm/drm_dp_helper.c u8 l = dp_link_status(link_status, i); l 59 drivers/gpu/drm/drm_dp_helper.c return (l >> s) & 0xf; l 104 drivers/gpu/drm/drm_dp_helper.c u8 l = dp_link_status(link_status, i); l 106 drivers/gpu/drm/drm_dp_helper.c return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; l 117 drivers/gpu/drm/drm_dp_helper.c u8 l = dp_link_status(link_status, i); l 119 drivers/gpu/drm/drm_dp_helper.c return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; l 433 drivers/gpu/drm/exynos/exynos_drm_ipp.c const struct drm_exynos_ipp_limit *l = limits; l 438 drivers/gpu/drm/exynos/exynos_drm_ipp.c for (l = limits; l - limits < num_limits; l++) { l 439 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) != l 441 drivers/gpu/drm/exynos/exynos_drm_ipp.c ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) != l 444 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->h.min, l->h.min); l 445 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->h.max, l->h.max); l 446 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->h.align, l->h.align); l 447 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->v.min, l->v.min); l 448 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->v.max, l->v.max); l 449 drivers/gpu/drm/exynos/exynos_drm_ipp.c __limit_set_val(&res->v.align, l->v.align); l 464 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct drm_exynos_ipp_limit_val *l) l 466 drivers/gpu/drm/exynos/exynos_drm_ipp.c if ((l->min && val < l->min) || (l->max && val > l->max)) { l 468 drivers/gpu/drm/exynos/exynos_drm_ipp.c val, l->min, l->max); l 471 drivers/gpu/drm/exynos/exynos_drm_ipp.c return __align_check(val, l->align); l 479 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct drm_ipp_limit l; l 480 drivers/gpu/drm/exynos/exynos_drm_ipp.c struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; l 486 drivers/gpu/drm/exynos/exynos_drm_ipp.c __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); l 487 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (!__size_limit_check(real_width, &l.h) || l 488 drivers/gpu/drm/exynos/exynos_drm_ipp.c !__size_limit_check(buf->buf.height, &l.v)) l 492 drivers/gpu/drm/exynos/exynos_drm_ipp.c lv = &l.h; l 493 drivers/gpu/drm/exynos/exynos_drm_ipp.c lh = &l.v; l 495 drivers/gpu/drm/exynos/exynos_drm_ipp.c __get_size_limit(limits, num_limits, id, &l); l 112 drivers/gpu/drm/exynos/exynos_drm_ipp.h #define IPP_SRCDST_MFORMAT(f, m, l) \ l 113 drivers/gpu/drm/exynos/exynos_drm_ipp.h .fourcc = DRM_FORMAT_##f, .modifier = m, .limits = l, \ l 114 drivers/gpu/drm/exynos/exynos_drm_ipp.h .num_limits = ARRAY_SIZE(l), \ l 118 drivers/gpu/drm/exynos/exynos_drm_ipp.h #define IPP_SRCDST_FORMAT(f, l) IPP_SRCDST_MFORMAT(f, 0, l) l 120 drivers/gpu/drm/exynos/exynos_drm_ipp.h #define IPP_SIZE_LIMIT(l, val...) \ l 122 drivers/gpu/drm/exynos/exynos_drm_ipp.h DRM_EXYNOS_IPP_LIMIT_SIZE_##l), val l 623 drivers/gpu/drm/exynos/exynos_drm_scaler.c #define IPP_SRCDST_TILE_FORMAT(f, l) \ l 624 drivers/gpu/drm/exynos/exynos_drm_scaler.c IPP_SRCDST_MFORMAT(f, DRM_FORMAT_MOD_SAMSUNG_16_16_TILE, (l)) l 1256 drivers/gpu/drm/gma500/cdv_intel_dp.c uint8_t l = cdv_intel_dp_link_status(link_status, i); l 1258 drivers/gpu/drm/gma500/cdv_intel_dp.c return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; l 1269 drivers/gpu/drm/gma500/cdv_intel_dp.c uint8_t l = cdv_intel_dp_link_status(link_status, i); l 1271 drivers/gpu/drm/gma500/cdv_intel_dp.c return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; l 1340 drivers/gpu/drm/gma500/cdv_intel_dp.c uint8_t l = cdv_intel_dp_link_status(link_status, i); l 1342 drivers/gpu/drm/gma500/cdv_intel_dp.c return (l >> s) & 0xf; l 275 drivers/gpu/drm/i915/intel_uncore.h __raw_read(32, l) l 280 drivers/gpu/drm/i915/intel_uncore.h __raw_write(32, l) l 302 drivers/gpu/drm/i915/intel_uncore.h __uncore_read(read, 32, l, true) l 304 drivers/gpu/drm/i915/intel_uncore.h __uncore_read(read_notrace, 32, l, false) l 308 drivers/gpu/drm/i915/intel_uncore.h __uncore_write(write, 32, l, true) l 309 drivers/gpu/drm/i915/intel_uncore.h __uncore_write(write_notrace, 32, l, false) l 954 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c long i, l, datalen = 0; l 968 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c l = ascii85_encode_len(datalen); l 970 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c for (i = 0; i < l; i++) l 628 drivers/gpu/drm/msm/adreno/adreno_gpu.c long l; l 634 drivers/gpu/drm/msm/adreno/adreno_gpu.c l = ascii85_encode_len(len); l 640 drivers/gpu/drm/msm/adreno/adreno_gpu.c buffer_size = (l * 5) + 1; l 646 drivers/gpu/drm/msm/adreno/adreno_gpu.c for (i = 0; i < l; i++) l 37 drivers/gpu/drm/nouveau/include/nvkm/core/client.h #define nvif_printk(o,l,p,f,a...) do { \ l 40 drivers/gpu/drm/nouveau/include/nvkm/core/client.h if (_client->debug >= NV_DBG_##l) \ l 283 drivers/gpu/drm/nouveau/include/nvkm/core/device.h #define nvdev_printk_(d,l,p,f,a...) do { \ l 285 drivers/gpu/drm/nouveau/include/nvkm/core/device.h if (_device->debug >= (l)) \ l 288 drivers/gpu/drm/nouveau/include/nvkm/core/device.h #define nvdev_printk(d,l,p,f,a...) nvdev_printk_((d), NV_DBG_##l, p, f, ##a) l 37 drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h #define nvkm_printk_(s,l,p,f,a...) do { \ l 39 drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) { \ l 44 drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h #define nvkm_printk(s,l,p,f,a...) nvkm_printk_((s), NV_DBG_##l, p, f, ##a) l 329 drivers/gpu/drm/nouveau/nouveau_display.c struct nouveau_drm_prop_enum_list *l = (list); \ l 331 drivers/gpu/drm/nouveau/nouveau_display.c while (l->gen_mask) { \ l 332 drivers/gpu/drm/nouveau/nouveau_display.c if (l->gen_mask & (1 << (gen))) \ l 334 drivers/gpu/drm/nouveau/nouveau_display.c l++; \ l 338 drivers/gpu/drm/nouveau/nouveau_display.c l = (list); \ l 339 drivers/gpu/drm/nouveau/nouveau_display.c while (p && l->gen_mask) { \ l 340 drivers/gpu/drm/nouveau/nouveau_display.c if (l->gen_mask & (1 << (gen))) { \ l 341 drivers/gpu/drm/nouveau/nouveau_display.c drm_property_add_enum(p, l->type, l->name); \ l 343 drivers/gpu/drm/nouveau/nouveau_display.c l++; \ l 242 drivers/gpu/drm/nouveau/nouveau_drv.h #define NV_PRINTK(l,c,f,a...) do { \ l 244 drivers/gpu/drm/nouveau/nouveau_drv.h dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a); \ l 261 drivers/gpu/drm/nouveau/nouveau_drv.h #define NV_PRINTK_ONCE(l,c,f,a...) NV_PRINTK(l##_once,c,f, ##a) l 836 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_SOR_DP_CTRL(i, l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) l 848 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_SOR_DP_UNK118(i, l) (0x0061c118 + (i) * 0x800 + (l) * 0x80) l 849 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_SOR_DP_UNK120(i, l) (0x0061c120 + (i) * 0x800 + (l) * 0x80) l 850 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_SOR_DP_SCFG(i, l) (0x0061c128 + (i) * 0x800 + (l) * 0x80) l 851 drivers/gpu/drm/nouveau/nouveau_reg.h #define NV50_SOR_DP_UNK130(i, l) (0x0061c130 + (i) * 0x800 + (l) * 0x80) l 26 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h #define CONN_MSG(c,l,f,a...) do { \ l 28 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h nvkm_##l(&_conn->disp->engine.subdev, "conn %02x:%02x%02x: "f"\n", \ l 54 drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h #define DPCD_LC03(l) ((l) + 0x00103) l 47 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h #define HEAD_MSG(h,l,f,a...) do { \ l 49 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h nvkm_##l(&_h->disp->engine.subdev, "head-%d: "f"\n", _h->id, ##a); \ l 166 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h #define IOR_MSG(i,l,f,a...) do { \ l 168 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h nvkm_##l(&_ior->disp->engine.subdev, "%s: "f"\n", _ior->name, ##a); \ l 187 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c const u8 l = ffs(outp->info.link); l 189 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c const u16 m = (0x0100 << head->id) | (l << 6) | outp->info.or; l 48 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h #define OUTP_MSG(o,l,f,a...) do { \ l 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h nvkm_##l(&_outp->disp->engine.subdev, "outp %02x:%04x:%04x: "f"\n", \ l 78 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c calc_ref(struct nvkm_fb *fb, int l, int k, int i) l 83 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c int m = (l >> (8 * i) & 0xff) + calc_bias(fb, k, i, j); l 103 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c int l = nvkm_rd32(device, 0x1003d0); l 108 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c calc_ref(fb, l, 0, j)); l 112 drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c calc_ref(fb, l, 1, j)); l 175 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h #define ram_train_result(s,r,l) ramfuc_train_result((s), (r), (l)) l 32 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h #define AUX_MSG(b,l,f,a...) do { \ l 34 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h nvkm_##l(&_aux->pad->i2c->subdev, "aux %04x: "f"\n", _aux->id, ##a); \ l 32 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h #define BUS_MSG(b,l,f,a...) do { \ l 34 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h nvkm_##l(&_bus->pad->i2c->subdev, "bus %04x: "f"\n", _bus->id, ##a); \ l 61 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h #define PAD_MSG(p,l,f,a...) do { \ l 63 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h nvkm_##l(&_pad->i2c->subdev, "pad %04x: "f"\n", _pad->id, ##a); \ l 277 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h #define VMM_PRINT(l,v,p,f,a...) do { \ l 279 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) { \ l 384 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c struct list_head *l; l 389 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c list_for_each(l, imgs) l 240 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c struct list_head *l; l 245 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c list_for_each(l, imgs) l 1834 drivers/gpu/drm/omapdrm/dss/dispc.c u32 l; l 1839 drivers/gpu/drm/omapdrm/dss/dispc.c l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane)); l 1842 drivers/gpu/drm/omapdrm/dss/dispc.c l &= ~((0x3 << 5) | (0x1 << 21)); l 1843 drivers/gpu/drm/omapdrm/dss/dispc.c l |= (orig_width != out_width) ? (1 << 5) : 0; l 1844 drivers/gpu/drm/omapdrm/dss/dispc.c l |= (orig_height != out_height) ? (1 << 6) : 0; l 1845 drivers/gpu/drm/omapdrm/dss/dispc.c l |= five_taps ? (1 << 21) : 0; l 1849 drivers/gpu/drm/omapdrm/dss/dispc.c l &= ~(0x3 << 7); l 1850 drivers/gpu/drm/omapdrm/dss/dispc.c l |= (orig_width <= out_width) ? 0 : (1 << 7); l 1851 drivers/gpu/drm/omapdrm/dss/dispc.c l |= (orig_height <= out_height) ? 0 : (1 << 8); l 1856 drivers/gpu/drm/omapdrm/dss/dispc.c l &= ~(0x1 << 22); l 1857 drivers/gpu/drm/omapdrm/dss/dispc.c l |= five_taps ? (1 << 22) : 0; l 1860 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l); l 2795 drivers/gpu/drm/omapdrm/dss/dispc.c u32 l; l 2838 drivers/gpu/drm/omapdrm/dss/dispc.c l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane)); l 2839 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */ l 2840 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */ l 2841 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */ l 2843 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */ l 2845 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */ l 2846 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l); l 3014 drivers/gpu/drm/omapdrm/dss/dispc.c u32 l; l 3035 drivers/gpu/drm/omapdrm/dss/dispc.c l = dispc_read_reg(dispc, DISPC_CONTROL); l 3036 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, gpout0, 15, 15); l 3037 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, gpout1, 16, 16); l 3038 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_CONTROL, l); l 3125 drivers/gpu/drm/omapdrm/dss/dispc.c u32 timing_h, timing_v, l; l 3166 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_VAL(onoff, 17, 17) | l 3175 drivers/gpu/drm/omapdrm/dss/dispc.c l |= (1 << 18); l 3177 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_POL_FREQ(channel), l); l 3278 drivers/gpu/drm/omapdrm/dss/dispc.c u32 l; l 3279 drivers/gpu/drm/omapdrm/dss/dispc.c l = dispc_read_reg(dispc, DISPC_DIVISORo(channel)); l 3280 drivers/gpu/drm/omapdrm/dss/dispc.c *lck_div = FLD_GET(l, 23, 16); l 3281 drivers/gpu/drm/omapdrm/dss/dispc.c *pck_div = FLD_GET(l, 7, 0); l 3343 drivers/gpu/drm/omapdrm/dss/dispc.c u32 l; l 3345 drivers/gpu/drm/omapdrm/dss/dispc.c l = dispc_read_reg(dispc, DISPC_DIVISORo(channel)); l 3347 drivers/gpu/drm/omapdrm/dss/dispc.c pcd = FLD_GET(l, 7, 0); l 3419 drivers/gpu/drm/omapdrm/dss/dispc.c u32 l; l 3434 drivers/gpu/drm/omapdrm/dss/dispc.c l = dispc_read_reg(dispc, DISPC_DIVISOR); l 3435 drivers/gpu/drm/omapdrm/dss/dispc.c lcd = FLD_GET(l, 23, 16); l 3924 drivers/gpu/drm/omapdrm/dss/dispc.c u32 l; l 3928 drivers/gpu/drm/omapdrm/dss/dispc.c l = dispc_read_reg(dispc, DISPC_DIVISOR); l 3930 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, 1, 0, 0); l 3931 drivers/gpu/drm/omapdrm/dss/dispc.c l = FLD_MOD(l, 1, 23, 16); l 3932 drivers/gpu/drm/omapdrm/dss/dispc.c dispc_write_reg(dispc, DISPC_DIVISOR, l); l 1131 drivers/gpu/drm/omapdrm/dss/dsi.c u32 l; l 1137 drivers/gpu/drm/omapdrm/dss/dsi.c l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5); l 1826 drivers/gpu/drm/omapdrm/dss/dsi.c u32 l; l 1829 drivers/gpu/drm/omapdrm/dss/dsi.c l = 0; l 1835 drivers/gpu/drm/omapdrm/dss/dsi.c l |= 1 << (i * 2 + (p ? 0 : 1)); l 1838 drivers/gpu/drm/omapdrm/dss/dsi.c l |= 1 << (i * 2 + (p ? 1 : 0)); l 1853 drivers/gpu/drm/omapdrm/dss/dsi.c REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17); l 1888 drivers/gpu/drm/omapdrm/dss/dsi.c u32 l; l 1891 drivers/gpu/drm/omapdrm/dss/dsi.c l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5); l 1895 drivers/gpu/drm/omapdrm/dss/dsi.c if (!in_use[i] || (l & (1 << offsets[i]))) l 1904 drivers/gpu/drm/omapdrm/dss/dsi.c if (!in_use[i] || (l & (1 << offsets[i]))) l 2011 drivers/gpu/drm/omapdrm/dss/dsi.c u32 l; l 2037 drivers/gpu/drm/omapdrm/dss/dsi.c l = dsi_read_reg(dsi, DSI_TIMING1); l 2038 drivers/gpu/drm/omapdrm/dss/dsi.c l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ l 2039 drivers/gpu/drm/omapdrm/dss/dsi.c l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */ l 2040 drivers/gpu/drm/omapdrm/dss/dsi.c l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */ l 2041 drivers/gpu/drm/omapdrm/dss/dsi.c l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ l 2042 drivers/gpu/drm/omapdrm/dss/dsi.c dsi_write_reg(dsi, DSI_TIMING1, l); l 3817 drivers/gpu/drm/omapdrm/dss/dsi.c u32 l; l 3846 drivers/gpu/drm/omapdrm/dss/dsi.c l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ l 3847 drivers/gpu/drm/omapdrm/dss/dsi.c dsi_write_reg(dsi, DSI_VC_TE(channel), l); l 3853 drivers/gpu/drm/omapdrm/dss/dsi.c l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ l 3855 drivers/gpu/drm/omapdrm/dss/dsi.c l = FLD_MOD(l, 1, 31, 31); /* TE_START */ l 3856 drivers/gpu/drm/omapdrm/dss/dsi.c dsi_write_reg(dsi, DSI_VC_TE(channel), l); l 245 drivers/gpu/drm/omapdrm/dss/dss.c u32 l; l 249 drivers/gpu/drm/omapdrm/dss/dss.c l = dss_read_reg(dss, DSS_SDI_CONTROL); l 250 drivers/gpu/drm/omapdrm/dss/dss.c l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */ l 251 drivers/gpu/drm/omapdrm/dss/dss.c l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */ l 252 drivers/gpu/drm/omapdrm/dss/dss.c l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */ l 253 drivers/gpu/drm/omapdrm/dss/dss.c dss_write_reg(dss, DSS_SDI_CONTROL, l); l 255 drivers/gpu/drm/omapdrm/dss/dss.c l = dss_read_reg(dss, DSS_PLL_CONTROL); l 256 drivers/gpu/drm/omapdrm/dss/dss.c l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */ l 257 drivers/gpu/drm/omapdrm/dss/dss.c l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */ l 258 drivers/gpu/drm/omapdrm/dss/dss.c l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */ l 259 drivers/gpu/drm/omapdrm/dss/dss.c dss_write_reg(dss, DSS_PLL_CONTROL, l); l 699 drivers/gpu/drm/omapdrm/dss/dss.c int l = 0; l 702 drivers/gpu/drm/omapdrm/dss/dss.c l = 0; l 704 drivers/gpu/drm/omapdrm/dss/dss.c l = 1; l 709 drivers/gpu/drm/omapdrm/dss/dss.c REG_FLD_MOD(dss, DSS_CONTROL, l, 6, 6); l 161 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c int r, l; l 174 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c l = 128; l 180 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c l += 128; l 183 drivers/gpu/drm/omapdrm/dss/hdmi4_core.c return l; l 133 drivers/gpu/drm/omapdrm/dss/hdmi_wp.c u32 l = 0; l 138 drivers/gpu/drm/omapdrm/dss/hdmi_wp.c l |= FLD_VAL(video_fmt->y_res, 31, 16); l 139 drivers/gpu/drm/omapdrm/dss/hdmi_wp.c l |= FLD_VAL(video_fmt->x_res, 15, 0); l 140 drivers/gpu/drm/omapdrm/dss/hdmi_wp.c hdmi_write_reg(wp->base, HDMI_WP_VIDEO_SIZE, l); l 35 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c int l = 0, total = 0; l 38 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c for (i = 0; total < prop->length; total += l, p += l, i++) l 39 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c l = strlen(p) + 1; l 66 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c size_t l = strlen(src) + 1; l 72 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c dst += l; l 74 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c src += l; l 75 drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c total += l; l 396 drivers/gpu/drm/omapdrm/dss/pll.c u32 l; l 398 drivers/gpu/drm/omapdrm/dss/pll.c l = 0; l 400 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 1, 0, 0); /* PLL_STOPMODE */ l 401 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb); /* PLL_REGN */ l 402 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->m, hw->m_msb, hw->m_lsb); /* PLL_REGM */ l 404 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[0] ? cinfo->mX[0] - 1 : 0, l 407 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[1] ? cinfo->mX[1] - 1 : 0, l 409 drivers/gpu/drm/omapdrm/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION1); l 411 drivers/gpu/drm/omapdrm/dss/pll.c l = 0; l 413 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[2] ? cinfo->mX[2] - 1 : 0, l 416 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[3] ? cinfo->mX[3] - 1 : 0, l 418 drivers/gpu/drm/omapdrm/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION3); l 420 drivers/gpu/drm/omapdrm/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION2); l 428 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, f, 4, 1); /* PLL_FREQSEL */ l 432 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */ l 434 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 1, 13, 13); /* PLL_REFEN */ l 435 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0, 14, 14); /* PHY_CLKINEN */ l 436 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0, 16, 16); /* M4_CLOCK_EN */ l 437 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0, 18, 18); /* M5_CLOCK_EN */ l 438 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 1, 20, 20); /* HSDIVBYPASS */ l 440 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 3, 22, 21); /* REFSEL = sysclk */ l 441 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0, 23, 23); /* M6_CLOCK_EN */ l 442 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */ l 443 drivers/gpu/drm/omapdrm/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION2); l 466 drivers/gpu/drm/omapdrm/dss/pll.c l = readl_relaxed(base + PLL_STATUS); l 468 drivers/gpu/drm/omapdrm/dss/pll.c if (pll_is_locked(l) && l 495 drivers/gpu/drm/omapdrm/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION2); l 496 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 1, 14, 14); /* PHY_CLKINEN */ l 497 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[0] ? 1 : 0, 16, 16); /* M4_CLOCK_EN */ l 498 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[1] ? 1 : 0, 18, 18); /* M5_CLOCK_EN */ l 499 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0, 20, 20); /* HSDIVBYPASS */ l 500 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[2] ? 1 : 0, 23, 23); /* M6_CLOCK_EN */ l 501 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[3] ? 1 : 0, 25, 25); /* M7_CLOCK_EN */ l 502 drivers/gpu/drm/omapdrm/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION2); l 523 drivers/gpu/drm/omapdrm/dss/pll.c u32 l; l 525 drivers/gpu/drm/omapdrm/dss/pll.c l = 0; l 526 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->m, 20, 9); /* PLL_REGM */ l 527 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->n - 1, 8, 1); /* PLL_REGN */ l 528 drivers/gpu/drm/omapdrm/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION1); l 530 drivers/gpu/drm/omapdrm/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION2); l 531 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */ l 532 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0x1, 13, 13); /* PLL_REFEN */ l 533 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0x0, 14, 14); /* PHY_CLKINEN */ l 535 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0x3, 22, 21); /* REFSEL = SYSCLK */ l 539 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0x4, 3, 1); l 541 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, 0x2, 3, 1); l 542 drivers/gpu/drm/omapdrm/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION2); l 544 drivers/gpu/drm/omapdrm/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION3); l 545 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->sd, 17, 10); /* PLL_REGSD */ l 546 drivers/gpu/drm/omapdrm/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION3); l 548 drivers/gpu/drm/omapdrm/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION4); l 549 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mX[0], 24, 18); /* PLL_REGM2 */ l 550 drivers/gpu/drm/omapdrm/dss/pll.c l = FLD_MOD(l, cinfo->mf, 17, 0); /* PLL_REGM_F */ l 551 drivers/gpu/drm/omapdrm/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION4); l 317 drivers/gpu/drm/omapdrm/dss/venc.c u32 l = __raw_readl(venc->base + idx); l 318 drivers/gpu/drm/omapdrm/dss/venc.c return l; l 422 drivers/gpu/drm/omapdrm/dss/venc.c u32 l; l 435 drivers/gpu/drm/omapdrm/dss/venc.c l = 0; l 438 drivers/gpu/drm/omapdrm/dss/venc.c l |= 1 << 1; l 440 drivers/gpu/drm/omapdrm/dss/venc.c l |= (1 << 0) | (1 << 2); l 443 drivers/gpu/drm/omapdrm/dss/venc.c l |= 1 << 3; l 445 drivers/gpu/drm/omapdrm/dss/venc.c venc_write_reg(venc, VENC_OUTPUT_CONTROL, l); l 2728 drivers/gpu/drm/radeon/radeon.h #define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l)) l 2745 drivers/gpu/drm/radeon/radeon.h #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) l 2780 drivers/gpu/drm/radeon/radeon.h #define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l)) l 2781 drivers/gpu/drm/radeon/radeon.h #define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l)) l 2784 drivers/gpu/drm/radeon/radeon.h #define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l)) l 1002 drivers/gpu/drm/radeon/rv6xx_dpm.c int d_l, int d_r, u8 *l, u8 *r) l 1013 drivers/gpu/drm/radeon/rv6xx_dpm.c *l = d_l - h_r * a_n / a_d; l 264 drivers/gpu/drm/radeon/rv770_dpm.c u8 l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; l 268 drivers/gpu/drm/radeon/rv770_dpm.c l[0] = 0; l 276 drivers/gpu/drm/radeon/rv770_dpm.c l[1] = (u8)(pi->lmp - (int)pi->lmp * a_n / a_d); l 284 drivers/gpu/drm/radeon/rv770_dpm.c l[2] = (u8)(pi->lhp - (int)pi->lhp * a_n / a_d); l 288 drivers/gpu/drm/radeon/rv770_dpm.c a_t = CG_R(r[i] * pi->bsp / 200) | CG_L(l[i] * pi->bsp / 200); l 293 drivers/gpu/drm/radeon/rv770_dpm.c CG_L(l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200); l 394 drivers/gpu/drm/radeon/sumo_dpm.c u32 l[SUMO_MAX_HARDWARE_POWERLEVELS]; l 402 drivers/gpu/drm/radeon/sumo_dpm.c l[0] = SUMO_L_DFLT0; l 403 drivers/gpu/drm/radeon/sumo_dpm.c l[1] = SUMO_L_DFLT1; l 404 drivers/gpu/drm/radeon/sumo_dpm.c l[2] = SUMO_L_DFLT2; l 405 drivers/gpu/drm/radeon/sumo_dpm.c l[3] = SUMO_L_DFLT3; l 406 drivers/gpu/drm/radeon/sumo_dpm.c l[4] = SUMO_L_DFLT4; l 413 drivers/gpu/drm/radeon/sumo_dpm.c a_t = CG_R(m_a * r[i] / 100) | CG_L(m_a * l[i] / 100); l 424 drivers/gpu/drm/radeon/sumo_dpm.c CG_L(m_a * l[ps->num_levels - 1] / 100); l 445 drivers/gpu/drm/savage/savage_drv.h #define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF) l 446 drivers/gpu/drm/savage/savage_drv.h #define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF) l 29 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_MODCTL_LAY_EN(l) BIT(8 + l) l 43 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_LAYSIZE_REG(l) (0x810 + (0x4 * (l))) l 47 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_LAYCOOR_REG(l) (0x820 + (0x4 * (l))) l 51 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_LAYLINEWIDTH_REG(l) (0x840 + (0x4 * (l))) l 53 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_LAYFB_L32ADD_REG(l) (0x850 + (0x4 * (l))) l 56 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l) GENMASK(3 + ((l) * 8), (l) * 8) l 57 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_LAYFB_H4ADD(l, val) ((val) << ((l) * 8)) l 66 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_ATTCTL_REG0(l) (0x890 + (0x4 * (l))) l 77 drivers/gpu/drm/sun4i/sun4i_backend.h #define SUN4I_BACKEND_ATTCTL_REG1(l) (0x8a0 + (0x4 * (l))) l 705 drivers/gpu/vga/vgaarb.c u16 l; l 707 drivers/gpu/vga/vgaarb.c pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l); l 708 drivers/gpu/vga/vgaarb.c if (!(l & PCI_BRIDGE_CTL_VGA)) { l 161 drivers/hsi/clients/hsi_char.c static void hsc_reset_list(struct hsc_channel *channel, struct list_head *l) l 167 drivers/hsi/clients/hsi_char.c list_splice_init(l, &list); l 514 drivers/hwmon/lm90.c int oldh, newh, l; l 532 drivers/hwmon/lm90.c l = lm90_read_reg(client, regl); l 533 drivers/hwmon/lm90.c if (l < 0) l 534 drivers/hwmon/lm90.c return l; l 539 drivers/hwmon/lm90.c l = lm90_read_reg(client, regl); l 540 drivers/hwmon/lm90.c if (l < 0) l 541 drivers/hwmon/lm90.c return l; l 543 drivers/hwmon/lm90.c return (newh << 8) | l; l 36 drivers/hwmon/max16065.c #define MAX16065_LIMIT(l, x) (0x48 + (l) + (x) * 3) /* l 1183 drivers/hwmon/pmbus/pmbus_core.c const struct pmbus_limit_attr *l = attr->limit; l 1190 drivers/hwmon/pmbus/pmbus_core.c if (pmbus_check_word_register(client, page, l->reg)) { l 1191 drivers/hwmon/pmbus/pmbus_core.c curr = pmbus_add_sensor(data, name, l->attr, index, l 1192 drivers/hwmon/pmbus/pmbus_core.c page, l->reg, attr->class, l 1193 drivers/hwmon/pmbus/pmbus_core.c attr->update || l->update, l 1197 drivers/hwmon/pmbus/pmbus_core.c if (l->sbit && (info->func[page] & attr->sfunc)) { l 1199 drivers/hwmon/pmbus/pmbus_core.c l->alarm, index, l 1200 drivers/hwmon/pmbus/pmbus_core.c attr->compare ? l->low ? curr : base l 1202 drivers/hwmon/pmbus/pmbus_core.c attr->compare ? l->low ? base : curr l 1204 drivers/hwmon/pmbus/pmbus_core.c attr->sbase + page, l->sbit); l 1210 drivers/hwmon/pmbus/pmbus_core.c l++; l 53 drivers/hwmon/pmbus/zl6100.c static long zl6100_l2d(s16 l) l 59 drivers/hwmon/pmbus/zl6100.c exponent = l >> 11; l 60 drivers/hwmon/pmbus/zl6100.c mantissa = ((s16)((l & 0x7ff) << 5)) >> 5; l 394 drivers/i2c/busses/i2c-stm32f7.c u16 p, l, a, h; l 453 drivers/i2c/busses/i2c-stm32f7.c for (l = 0; l < STM32F7_SCLDEL_MAX; l++) { l 454 drivers/i2c/busses/i2c-stm32f7.c u32 scldel = (l + 1) * (p + 1) * i2cclk; l 472 drivers/i2c/busses/i2c-stm32f7.c v->scldel = l; l 511 drivers/i2c/busses/i2c-stm32f7.c for (l = 0; l < STM32F7_SCLL_MAX; l++) { l 512 drivers/i2c/busses/i2c-stm32f7.c u32 tscl_l = (l + 1) * prescaler + tsync; l 535 drivers/i2c/busses/i2c-stm32f7.c v->scll = l; l 215 drivers/i3c/master/i3c-master-cdns.c #define CMD0_FIFO_PL_LEN(l) ((l) << 12) l 228 drivers/i3c/master/i3c-master-cdns.c #define IMD_CMD0_PL_LEN(l) ((l) << 12) l 310 drivers/i3c/master/i3c-master-cdns.c #define SIR_MAP_DEV_PL(l) ((l) << 8) l 99 drivers/ide/ide-floppy_ioctl.c u8 *buf, int b, int l, l 115 drivers/ide/ide-floppy_ioctl.c put_unaligned(cpu_to_be32(l), (unsigned int *)(&buf[8])); l 57 drivers/ide/ide-scan-pci.c struct list_head *l; l 61 drivers/ide/ide-scan-pci.c list_for_each(l, &ide_pci_drivers) { l 62 drivers/ide/ide-scan-pci.c d = list_entry(l, struct pci_driver, node); l 93 drivers/ide/ide-scan-pci.c struct list_head *l, *n; l 104 drivers/ide/ide-scan-pci.c list_for_each_safe(l, n, &ide_pci_drivers) { l 105 drivers/ide/ide-scan-pci.c list_del(l); l 106 drivers/ide/ide-scan-pci.c d = list_entry(l, struct pci_driver, node); l 789 drivers/iio/industrialio-buffer.c struct list_head l; l 795 drivers/iio/industrialio-buffer.c list_for_each_entry_safe(p, q, &buffer->demux_list, l) { l 796 drivers/iio/industrialio-buffer.c list_del(&p->l); l 816 drivers/iio/industrialio-buffer.c list_add_tail(&(*p)->l, &buffer->demux_list); l 1327 drivers/iio/industrialio-buffer.c list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) l 1376 drivers/iio/industrialio-buffer.c list_for_each_entry(t, &buffer->demux_list, l) l 590 drivers/iio/industrialio-core.c int l = 0; l 593 drivers/iio/industrialio-core.c l += snprintf(&buf[l], len - l, "%d ", vals[i]); l 594 drivers/iio/industrialio-core.c if (l >= len) l 597 drivers/iio/industrialio-core.c return l; l 1036 drivers/iio/industrialio-core.c list_for_each_entry(t, attr_list, l) l 1045 drivers/iio/industrialio-core.c list_add(&iio_attr->l, attr_list); l 1224 drivers/iio/industrialio-core.c list_for_each_entry_safe(p, n, attr_list, l) { l 1226 drivers/iio/industrialio-core.c list_del(&p->l); l 1375 drivers/iio/industrialio-core.c list_for_each_entry(p, &indio_dev->channel_attr_list, l) l 518 drivers/iio/industrialio-event.c l) l 21 drivers/iio/inkern.c struct list_head l; l 44 drivers/iio/inkern.c list_add_tail(&mapi->l, &iio_map_list); l 64 drivers/iio/inkern.c list_for_each_entry_safe(mapi, next, &iio_map_list, l) { l 66 drivers/iio/inkern.c list_del(&mapi->l); l 289 drivers/iio/inkern.c list_for_each_entry(c_i, &iio_map_list, l) { l 424 drivers/iio/inkern.c list_for_each_entry(c, &iio_map_list, l) l 443 drivers/iio/inkern.c list_for_each_entry(c, &iio_map_list, l) { l 20 drivers/iio/trigger/iio-trig-sysfs.c struct list_head l; l 137 drivers/iio/trigger/iio-trig-sysfs.c list_for_each_entry(t, &iio_sysfs_trig_list, l) l 168 drivers/iio/trigger/iio-trig-sysfs.c list_add(&t->l, &iio_sysfs_trig_list); l 188 drivers/iio/trigger/iio-trig-sysfs.c list_for_each_entry(t, &iio_sysfs_trig_list, l) l 201 drivers/iio/trigger/iio-trig-sysfs.c list_del(&t->l); l 277 drivers/infiniband/hw/hfi1/driver.c packet->ohdr = &rhdr->u.l.oth; l 278 drivers/infiniband/hw/hfi1/driver.c packet->grh = &rhdr->u.l.grh; l 650 drivers/infiniband/hw/hfi1/driver.c packet->ohdr = &hdr->u.l.oth; l 651 drivers/infiniband/hw/hfi1/driver.c packet->grh = &hdr->u.l.grh; l 1463 drivers/infiniband/hw/hfi1/driver.c packet->ohdr = &hdr->u.l.oth; l 1464 drivers/infiniband/hw/hfi1/driver.c packet->grh = &hdr->u.l.grh; l 1789 drivers/infiniband/hw/hfi1/driver.c packet.ohdr = &hdr->u.l.oth; l 559 drivers/infiniband/hw/hfi1/hfi.h ohdr = &hdr->u.l.oth; l 568 drivers/infiniband/hw/hfi1/hfi.h ohdr = &hdr_16b->u.l.oth; l 149 drivers/infiniband/hw/hfi1/qp.c static void flush_list_head(struct list_head *l) l 151 drivers/infiniband/hw/hfi1/qp.c while (!list_empty(l)) { l 155 drivers/infiniband/hw/hfi1/qp.c l, l 460 drivers/infiniband/hw/hfi1/rc.c ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; l 468 drivers/infiniband/hw/hfi1/rc.c ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; l 1282 drivers/infiniband/hw/hfi1/rc.c *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, l 1285 drivers/infiniband/hw/hfi1/rc.c ohdr = &hdr->u.l.oth; l 1340 drivers/infiniband/hw/hfi1/rc.c *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh, l 1343 drivers/infiniband/hw/hfi1/rc.c ohdr = &hdr->u.l.oth; l 302 drivers/infiniband/hw/hfi1/ruc.c grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh; l 375 drivers/infiniband/hw/hfi1/ruc.c struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh; l 59 drivers/infiniband/hw/hfi1/trace.c ohdr = &hdr->u.l.oth; l 79 drivers/infiniband/hw/hfi1/trace.c ohdr = &hdr->u.l.oth; l 382 drivers/infiniband/hw/hfi1/trace_ibhdrs.h ohdr = &opah->opah.u.l.oth; l 406 drivers/infiniband/hw/hfi1/trace_ibhdrs.h ohdr = &opah->ibh.u.l.oth; l 99 drivers/infiniband/hw/hfi1/uc.c ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; l 107 drivers/infiniband/hw/hfi1/uc.c ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; l 329 drivers/infiniband/hw/hfi1/ud.c grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh; l 335 drivers/infiniband/hw/hfi1/ud.c ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth; l 426 drivers/infiniband/hw/hfi1/ud.c grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh; l 431 drivers/infiniband/hw/hfi1/ud.c ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth; l 652 drivers/infiniband/hw/hfi1/ud.c struct ib_grh *grh = &hdr.opah.u.l.grh; l 660 drivers/infiniband/hw/hfi1/ud.c ohdr = &hdr.opah.u.l.oth; l 710 drivers/infiniband/hw/hfi1/ud.c struct ib_grh *grh = &hdr.ibh.u.l.grh; l 718 drivers/infiniband/hw/hfi1/ud.c ohdr = &hdr.ibh.u.l.oth; l 1273 drivers/infiniband/hw/hfi1/verbs.c ohdr = &hdr->u.l.oth; l 1282 drivers/infiniband/hw/hfi1/verbs.c ohdr = &hdr->u.l.oth; l 127 drivers/infiniband/hw/hfi1/verbs.h } l; l 292 drivers/infiniband/hw/qib/qib_driver.c u32 ctxt, u32 eflags, u32 l, u32 etail, l 331 drivers/infiniband/hw/qib/qib_driver.c ohdr = &hdr->u.l.oth; l 332 drivers/infiniband/hw/qib/qib_driver.c if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) l 334 drivers/infiniband/hw/qib/qib_driver.c vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); l 448 drivers/infiniband/hw/qib/qib_driver.c u32 etail = -1, l, hdrqtail; l 455 drivers/infiniband/hw/qib/qib_driver.c l = rcd->head; l 456 drivers/infiniband/hw/qib/qib_driver.c rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; l 465 drivers/infiniband/hw/qib/qib_driver.c if (l == hdrqtail) l 508 drivers/infiniband/hw/qib/qib_driver.c crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, l 518 drivers/infiniband/hw/qib/qib_driver.c l += rsize; l 519 drivers/infiniband/hw/qib/qib_driver.c if (l >= maxcnt) l 520 drivers/infiniband/hw/qib/qib_driver.c l = 0; l 524 drivers/infiniband/hw/qib/qib_driver.c rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; l 532 drivers/infiniband/hw/qib/qib_driver.c } else if (l == hdrqtail) l 540 drivers/infiniband/hw/qib/qib_driver.c lval = l; l 547 drivers/infiniband/hw/qib/qib_driver.c rcd->head = l; l 233 drivers/infiniband/hw/qib/qib_rc.c ohdr = &priv->s_hdr->u.l.oth; l 629 drivers/infiniband/hw/qib/qib_rc.c hwords += qib_make_grh(ibp, &hdr.u.l.grh, l 632 drivers/infiniband/hw/qib/qib_rc.c ohdr = &hdr.u.l.oth; l 895 drivers/infiniband/hw/qib/qib_rc.c ohdr = &hdr->u.l.oth; l 1732 drivers/infiniband/hw/qib/qib_rc.c ohdr = &hdr->u.l.oth; l 101 drivers/infiniband/hw/qib/qib_ruc.c if (!gid_ok(&hdr->u.l.grh.dgid, l 104 drivers/infiniband/hw/qib/qib_ruc.c if (!gid_ok(&hdr->u.l.grh.sgid, l 140 drivers/infiniband/hw/qib/qib_ruc.c if (!gid_ok(&hdr->u.l.grh.dgid, l 143 drivers/infiniband/hw/qib/qib_ruc.c if (!gid_ok(&hdr->u.l.grh.sgid, l 221 drivers/infiniband/hw/qib/qib_ruc.c qib_make_grh(ibp, &priv->s_hdr->u.l.grh, l 77 drivers/infiniband/hw/qib/qib_uc.c ohdr = &priv->s_hdr->u.l.oth; l 255 drivers/infiniband/hw/qib/qib_uc.c ohdr = &hdr->u.l.oth; l 324 drivers/infiniband/hw/qib/qib_ud.c qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh, l 328 drivers/infiniband/hw/qib/qib_ud.c ohdr = &priv->s_hdr->u.l.oth; l 443 drivers/infiniband/hw/qib/qib_ud.c ohdr = &hdr->u.l.oth; l 549 drivers/infiniband/hw/qib/qib_ud.c rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh, l 305 drivers/infiniband/hw/qib/qib_verbs.c ohdr = &hdr->u.l.oth; l 306 drivers/infiniband/hw/qib/qib_verbs.c if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) l 308 drivers/infiniband/hw/qib/qib_verbs.c vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); l 328 drivers/infiniband/hw/qib/qib_verbs.c mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid); l 477 drivers/infiniband/hw/qib/qib_verbs.c u32 l = len; l 479 drivers/infiniband/hw/qib/qib_verbs.c while (l >= sizeof(u32)) { l 487 drivers/infiniband/hw/qib/qib_verbs.c l -= sizeof(u32); l 492 drivers/infiniband/hw/qib/qib_verbs.c if (l) { l 495 drivers/infiniband/hw/qib/qib_verbs.c if (l + extra >= sizeof(u32)) { l 497 drivers/infiniband/hw/qib/qib_verbs.c len -= l + extra - sizeof(u32); l 508 drivers/infiniband/hw/qib/qib_verbs.c data |= clear_upper_bytes(v, l, extra); l 513 drivers/infiniband/hw/qib/qib_verbs.c extra += l; l 571 drivers/infiniband/hw/qib/qib_verbs.c struct list_head *l = dev->txreq_free.next; l 573 drivers/infiniband/hw/qib/qib_verbs.c list_del(l); l 576 drivers/infiniband/hw/qib/qib_verbs.c tx = list_entry(l, struct qib_verbs_txreq, txreq.list); l 601 drivers/infiniband/hw/qib/qib_verbs.c struct list_head *l = dev->txreq_free.next; l 603 drivers/infiniband/hw/qib/qib_verbs.c list_del(l); l 605 drivers/infiniband/hw/qib/qib_verbs.c tx = list_entry(l, struct qib_verbs_txreq, txreq.list); l 1629 drivers/infiniband/hw/qib/qib_verbs.c struct list_head *l = dev->txreq_free.next; l 1632 drivers/infiniband/hw/qib/qib_verbs.c list_del(l); l 1633 drivers/infiniband/hw/qib/qib_verbs.c tx = list_entry(l, struct qib_verbs_txreq, txreq.list); l 1665 drivers/infiniband/hw/qib/qib_verbs.c struct list_head *l = dev->txreq_free.next; l 1668 drivers/infiniband/hw/qib/qib_verbs.c list_del(l); l 1669 drivers/infiniband/hw/qib/qib_verbs.c tx = list_entry(l, struct qib_verbs_txreq, txreq.list); l 235 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c u32 l, crc = 0; l 254 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c l = netdev_hw_addr_list_count(hw_list) * ETH_ALEN; l 255 drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c crc = ~crc32_le(crc, (void *)&l, sizeof(l)); l 574 drivers/input/joystick/sidewinder.c int i, j, k, l; l 638 drivers/input/joystick/sidewinder.c l = 0; l 644 drivers/input/joystick/sidewinder.c dbg("Init 3: Mode %d. Length %d. Last %d. Tries %d.", m, i, l, k); l 646 drivers/input/joystick/sidewinder.c if (i > l) { /* Longer? As we can only lose bits, it makes */ l 648 drivers/input/joystick/sidewinder.c l = i; /* than the previous one */ l 715 drivers/input/joystick/sidewinder.c l = j; l 763 drivers/input/joystick/sidewinder.c dbg("%s%s [%d-bit id %d data %d]\n", sw->name, comment, m, l, k); l 177 drivers/input/joystick/tmdc.c int i, k, l; l 207 drivers/input/joystick/tmdc.c for (k = l = 0; k < 4; k++) { l 209 drivers/input/joystick/tmdc.c input_report_key(port->dev, port->btn[i + l], l 211 drivers/input/joystick/tmdc.c l += port->btnc[k]; l 413 drivers/input/misc/hp_sdc_rtc.c unsigned long l; l 415 drivers/input/misc/hp_sdc_rtc.c l = 0; l 416 drivers/input/misc/hp_sdc_rtc.c if (l != 0) l 60 drivers/input/misc/yealink.c #define _LOC(k,l) { .a = (k), .m = (l) } l 212 drivers/input/mouse/hgpk.c int l, int r, int x, int y) l 218 drivers/input/mouse/hgpk.c if (l || r) l 139 drivers/input/serio/olpc_apsp.c unsigned long l; l 142 drivers/input/serio/olpc_apsp.c l = readl(priv->base + COMMAND_FIFO_STATUS); l 143 drivers/input/serio/olpc_apsp.c if (!(l & CMD_STS_MASK)) { l 760 drivers/interconnect/qcom/sdm845.c const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l; l 763 drivers/interconnect/qcom/sdm845.c if (l[0]->aux_data.vcd < r[0]->aux_data.vcd) l 765 drivers/interconnect/qcom/sdm845.c else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd) l 44 drivers/iommu/io-pgtable-arm.c #define ARM_LPAE_LVL_SHIFT(l,d) \ l 45 drivers/iommu/io-pgtable-arm.c ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ l 57 drivers/iommu/io-pgtable-arm.c #define ARM_LPAE_PGD_IDX(l,d) \ l 58 drivers/iommu/io-pgtable-arm.c ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) l 60 drivers/iommu/io-pgtable-arm.c #define ARM_LPAE_LVL_IDX(a,l,d) \ l 61 drivers/iommu/io-pgtable-arm.c (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ l 62 drivers/iommu/io-pgtable-arm.c ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) l 65 drivers/iommu/io-pgtable-arm.c #define ARM_LPAE_BLOCK_SIZE(l,d) \ l 67 drivers/iommu/io-pgtable-arm.c ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) l 175 drivers/iommu/io-pgtable-arm.c #define iopte_type(pte,l) \ l 150 drivers/iommu/omap-iommu.c u32 l = iommu_read_reg(obj, MMU_CNTL); l 157 drivers/iommu/omap-iommu.c l &= ~MMU_CNTL_MASK; l 159 drivers/iommu/omap-iommu.c l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); l 161 drivers/iommu/omap-iommu.c l |= (MMU_CNTL_MMU_EN); l 163 drivers/iommu/omap-iommu.c iommu_write_reg(obj, l, MMU_CNTL); l 168 drivers/iommu/omap-iommu.c u32 l, pa; l 177 drivers/iommu/omap-iommu.c l = iommu_read_reg(obj, MMU_REVISION); l 179 drivers/iommu/omap-iommu.c (l >> 4) & 0xf, l & 0xf); l 195 drivers/iommu/omap-iommu.c u32 l = iommu_read_reg(obj, MMU_CNTL); l 197 drivers/iommu/omap-iommu.c l &= ~MMU_CNTL_MASK; l 198 drivers/iommu/omap-iommu.c iommu_write_reg(obj, l, MMU_CNTL); l 262 drivers/iommu/omap-iommu.c void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) l 268 drivers/iommu/omap-iommu.c l->base = MMU_LOCK_BASE(val); l 269 drivers/iommu/omap-iommu.c l->vict = MMU_LOCK_VICT(val); l 272 drivers/iommu/omap-iommu.c void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) l 276 drivers/iommu/omap-iommu.c val = (l->base << MMU_LOCK_BASE_SHIFT); l 277 drivers/iommu/omap-iommu.c val |= (l->vict << MMU_LOCK_VICT_SHIFT); l 301 drivers/iommu/omap-iommu.c struct iotlb_lock l; l 303 drivers/iommu/omap-iommu.c iotlb_lock_get(obj, &l); l 304 drivers/iommu/omap-iommu.c l.vict = n; l 305 drivers/iommu/omap-iommu.c iotlb_lock_set(obj, &l); l 344 drivers/iommu/omap-iommu.c struct iotlb_lock l; l 352 drivers/iommu/omap-iommu.c iotlb_lock_get(obj, &l); l 353 drivers/iommu/omap-iommu.c if (l.base == obj->nr_tlb_entries) { l 372 drivers/iommu/omap-iommu.c iotlb_lock_get(obj, &l); l 374 drivers/iommu/omap-iommu.c l.vict = l.base; l 375 drivers/iommu/omap-iommu.c iotlb_lock_set(obj, &l); l 388 drivers/iommu/omap-iommu.c l.base++; l 390 drivers/iommu/omap-iommu.c if (++l.vict == obj->nr_tlb_entries) l 391 drivers/iommu/omap-iommu.c l.vict = l.base; l 392 drivers/iommu/omap-iommu.c iotlb_lock_set(obj, &l); l 456 drivers/iommu/omap-iommu.c struct iotlb_lock l; l 460 drivers/iommu/omap-iommu.c l.base = 0; l 461 drivers/iommu/omap-iommu.c l.vict = 0; l 462 drivers/iommu/omap-iommu.c iotlb_lock_set(obj, &l); l 932 drivers/iommu/omap-iommu.c struct iotlb_lock l; l 940 drivers/iommu/omap-iommu.c l.base = 0; l 943 drivers/iommu/omap-iommu.c l.vict = i; l 944 drivers/iommu/omap-iommu.c iotlb_lock_set(obj, &l); l 947 drivers/iommu/omap-iommu.c l.base = obj->num_cr_ctx; l 948 drivers/iommu/omap-iommu.c l.vict = i; l 949 drivers/iommu/omap-iommu.c iotlb_lock_set(obj, &l); l 236 drivers/iommu/omap-iommu.h void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l); l 237 drivers/iommu/omap-iommu.h void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l); l 114 drivers/ipack/ipack.c unsigned int i, c, l, s; l 120 drivers/ipack/ipack.c l = 0x7; s = 1; break; l 122 drivers/ipack/ipack.c l = 0xf; s = 2; break; l 129 drivers/ipack/ipack.c if ((i & l) == 0) l 322 drivers/irqchip/irq-gic-v3-its.c static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) l 324 drivers/irqchip/irq-gic-v3-its.c u64 mask = GENMASK_ULL(h, l); l 326 drivers/irqchip/irq-gic-v3-its.c *raw_cmd |= (val << l) & mask; l 1329 drivers/isdn/capi/capi.c struct list_head *l; l 1332 drivers/isdn/capi/capi.c list_for_each(l, &capidev_list) { l 1333 drivers/isdn/capi/capi.c cdev = list_entry(l, struct capidev, list); l 107 drivers/isdn/capi/capilib.c struct list_head *l; l 110 drivers/isdn/capi/capilib.c list_for_each(l, head) { l 111 drivers/isdn/capi/capilib.c np = list_entry(l, struct capilib_ncci, list); l 128 drivers/isdn/capi/capilib.c struct list_head *l, *n; l 131 drivers/isdn/capi/capilib.c list_for_each_safe(l, n, head) { l 132 drivers/isdn/capi/capilib.c np = list_entry(l, struct capilib_ncci, list); l 145 drivers/isdn/capi/capilib.c struct list_head *l, *n; l 148 drivers/isdn/capi/capilib.c list_for_each_safe(l, n, head) { l 149 drivers/isdn/capi/capilib.c np = list_entry(l, struct capilib_ncci, list); l 160 drivers/isdn/capi/capilib.c struct list_head *l; l 163 drivers/isdn/capi/capilib.c list_for_each(l, head) { l 164 drivers/isdn/capi/capilib.c np = list_entry(l, struct capilib_ncci, list); l 183 drivers/isdn/capi/capilib.c struct list_head *l; l 186 drivers/isdn/capi/capilib.c list_for_each(l, head) { l 187 drivers/isdn/capi/capilib.c np = list_entry(l, struct capilib_ncci, list); l 194 drivers/isdn/capi/capiutil.c #define structTLcpy(x, y, l) memcpy(x, y, l) l 195 drivers/isdn/capi/capiutil.c #define structTLcpyovl(x, y, l) memmove(x, y, l) l 200 drivers/isdn/capi/capiutil.c #define structTRcpy(x, y, l) memcpy(y, x, l) l 201 drivers/isdn/capi/capiutil.c #define structTRcpyovl(x, y, l) memmove(y, x, l) l 255 drivers/isdn/capi/capiutil.c byteTLcpy(cmsg->m + cmsg->l, OFF); l 256 drivers/isdn/capi/capiutil.c cmsg->l++; l 259 drivers/isdn/capi/capiutil.c wordTLcpy(cmsg->m + cmsg->l, OFF); l 260 drivers/isdn/capi/capiutil.c cmsg->l += 2; l 263 drivers/isdn/capi/capiutil.c dwordTLcpy(cmsg->m + cmsg->l, OFF); l 264 drivers/isdn/capi/capiutil.c cmsg->l += 4; l 268 drivers/isdn/capi/capiutil.c *(cmsg->m + cmsg->l) = '\0'; l 269 drivers/isdn/capi/capiutil.c cmsg->l++; l 271 drivers/isdn/capi/capiutil.c structTLcpy(cmsg->m + cmsg->l, *(_cstruct *) OFF, 1 + **(_cstruct *) OFF); l 272 drivers/isdn/capi/capiutil.c cmsg->l += 1 + **(_cstruct *) OFF; l 275 drivers/isdn/capi/capiutil.c structTLcpy(cmsg->m + cmsg->l, s, 3 + *(u16 *) (s + 1)); l 276 drivers/isdn/capi/capiutil.c cmsg->l += 3 + *(u16 *) (s + 1); l 282 drivers/isdn/capi/capiutil.c *(cmsg->m + cmsg->l) = '\0'; l 283 drivers/isdn/capi/capiutil.c cmsg->l++; l 288 drivers/isdn/capi/capiutil.c unsigned _l = cmsg->l; l 290 drivers/isdn/capi/capiutil.c cmsg->l++; l 293 drivers/isdn/capi/capiutil.c _ls = cmsg->l - _l - 1; l 318 drivers/isdn/capi/capiutil.c cmsg->l = 8; l 326 drivers/isdn/capi/capiutil.c wordTLcpy(msg + 0, &cmsg->l); l 342 drivers/isdn/capi/capiutil.c byteTRcpy(cmsg->m + cmsg->l, OFF); l 343 drivers/isdn/capi/capiutil.c cmsg->l++; l 346 drivers/isdn/capi/capiutil.c wordTRcpy(cmsg->m + cmsg->l, OFF); l 347 drivers/isdn/capi/capiutil.c cmsg->l += 2; l 350 drivers/isdn/capi/capiutil.c dwordTRcpy(cmsg->m + cmsg->l, OFF); l 351 drivers/isdn/capi/capiutil.c cmsg->l += 4; l 354 drivers/isdn/capi/capiutil.c *(u8 **) OFF = cmsg->m + cmsg->l; l 356 drivers/isdn/capi/capiutil.c if (cmsg->m[cmsg->l] != 0xff) l 357 drivers/isdn/capi/capiutil.c cmsg->l += 1 + cmsg->m[cmsg->l]; l 359 drivers/isdn/capi/capiutil.c cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1); l 363 drivers/isdn/capi/capiutil.c if (cmsg->m[cmsg->l] == '\0') { l 365 drivers/isdn/capi/capiutil.c cmsg->l++; l 368 drivers/isdn/capi/capiutil.c unsigned _l = cmsg->l; l 370 drivers/isdn/capi/capiutil.c cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1; l 391 drivers/isdn/capi/capiutil.c cmsg->l = 8; l 401 drivers/isdn/capi/capiutil.c wordTRcpy(msg + 0, &cmsg->l); l 667 drivers/isdn/capi/capiutil.c cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u8 *) (cmsg->m + cmsg->l)); l 668 drivers/isdn/capi/capiutil.c cmsg->l++; l 671 drivers/isdn/capi/capiutil.c cdb = bufprint(cdb, "%-*s = 0x%x\n", slen, NAME, *(u16 *) (cmsg->m + cmsg->l)); l 672 drivers/isdn/capi/capiutil.c cmsg->l += 2; l 675 drivers/isdn/capi/capiutil.c cdb = bufprint(cdb, "%-*s = 0x%lx\n", slen, NAME, *(u32 *) (cmsg->m + cmsg->l)); l 676 drivers/isdn/capi/capiutil.c cmsg->l += 4; l 680 drivers/isdn/capi/capiutil.c if (cmsg->m[cmsg->l] == '\0') l 683 drivers/isdn/capi/capiutil.c cdb = printstruct(cdb, cmsg->m + cmsg->l); l 685 drivers/isdn/capi/capiutil.c if (cmsg->m[cmsg->l] != 0xff) l 686 drivers/isdn/capi/capiutil.c cmsg->l += 1 + cmsg->m[cmsg->l]; l 688 drivers/isdn/capi/capiutil.c cmsg->l += 3 + *(u16 *) (cmsg->m + cmsg->l + 1); l 694 drivers/isdn/capi/capiutil.c if (cmsg->m[cmsg->l] == '\0') { l 696 drivers/isdn/capi/capiutil.c cmsg->l++; l 700 drivers/isdn/capi/capiutil.c unsigned _l = cmsg->l; l 702 drivers/isdn/capi/capiutil.c cmsg->l = (cmsg->m + _l)[0] == 255 ? cmsg->l + 3 : cmsg->l + 1; l 785 drivers/isdn/capi/capiutil.c cmsg->l = 8; l 823 drivers/isdn/capi/capiutil.c cmsg->l = 8; l 1025 drivers/isdn/capi/kcapi.c struct list_head *l; l 1050 drivers/isdn/capi/kcapi.c list_for_each(l, &capi_drivers) { l 1051 drivers/isdn/capi/kcapi.c driver = list_entry(l, struct capi_driver, list); l 1057 drivers/isdn/capi/kcapi.c list_for_each(l, &capi_drivers) { l 1058 drivers/isdn/capi/kcapi.c driver = list_entry(l, struct capi_driver, list); l 1227 drivers/isdn/capi/kcapi.c struct list_head *l; l 1244 drivers/isdn/capi/kcapi.c list_for_each(l, &capi_drivers) { l 1245 drivers/isdn/capi/kcapi.c driver = list_entry(l, struct capi_driver, list); l 70 drivers/isdn/hardware/mISDN/mISDNisar.c int l = 0; l 72 drivers/isdn/hardware/mISDN/mISDNisar.c while (l < (int)len) { l 73 drivers/isdn/hardware/mISDN/mISDNisar.c hex_dump_to_buffer(msg + l, len - l, 32, 1, l 76 drivers/isdn/hardware/mISDN/mISDNisar.c __func__, l, isar->log); l 77 drivers/isdn/hardware/mISDN/mISDNisar.c l += 32; l 99 drivers/isdn/hardware/mISDN/mISDNisar.c int l = 0; l 101 drivers/isdn/hardware/mISDN/mISDNisar.c while (l < (int)isar->clsb) { l 102 drivers/isdn/hardware/mISDN/mISDNisar.c hex_dump_to_buffer(msg + l, isar->clsb - l, 32, l 105 drivers/isdn/hardware/mISDN/mISDNisar.c __func__, l, isar->log); l 106 drivers/isdn/hardware/mISDN/mISDNisar.c l += 32; l 871 drivers/isdn/mISDN/l1oip_core.c int l, ll; l 888 drivers/isdn/mISDN/l1oip_core.c l = skb->len; l 889 drivers/isdn/mISDN/l1oip_core.c while (l) { l 894 drivers/isdn/mISDN/l1oip_core.c ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; l 898 drivers/isdn/mISDN/l1oip_core.c l -= ll; l 1090 drivers/isdn/mISDN/l1oip_core.c int l, ll; l 1106 drivers/isdn/mISDN/l1oip_core.c l = skb->len; l 1107 drivers/isdn/mISDN/l1oip_core.c if (!memchr_inv(skb->data, 0xff, l)) { l 1111 drivers/isdn/mISDN/l1oip_core.c hc->chan[bch->slot].tx_counter += l; l 1117 drivers/isdn/mISDN/l1oip_core.c l = skb->len; l 1118 drivers/isdn/mISDN/l1oip_core.c if (!memchr_inv(skb->data, 0x2a, l)) { l 1122 drivers/isdn/mISDN/l1oip_core.c hc->chan[bch->slot].tx_counter += l; l 1130 drivers/isdn/mISDN/l1oip_core.c l = skb->len; l 1131 drivers/isdn/mISDN/l1oip_core.c while (l) { l 1136 drivers/isdn/mISDN/l1oip_core.c ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1; l 1141 drivers/isdn/mISDN/l1oip_core.c l -= ll; l 1861 drivers/isdn/mISDN/layer2.c u_int l; l 1864 drivers/isdn/mISDN/layer2.c l = l2addrsize(l2); l 1865 drivers/isdn/mISDN/layer2.c if (skb->len <= l) { l 1898 drivers/isdn/mISDN/layer2.c datap += l; l 46 drivers/isdn/mISDN/socket.c mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) l 48 drivers/isdn/mISDN/socket.c write_lock_bh(&l->lock); l 49 drivers/isdn/mISDN/socket.c sk_add_node(sk, &l->head); l 50 drivers/isdn/mISDN/socket.c write_unlock_bh(&l->lock); l 53 drivers/isdn/mISDN/socket.c static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) l 55 drivers/isdn/mISDN/socket.c write_lock_bh(&l->lock); l 57 drivers/isdn/mISDN/socket.c write_unlock_bh(&l->lock); l 785 drivers/leds/leds-tca6507.c struct tca6507_led *l = tca->leds + i; l 787 drivers/leds/leds-tca6507.c l->chip = tca; l 788 drivers/leds/leds-tca6507.c l->num = i; l 790 drivers/leds/leds-tca6507.c l->led_cdev.name = pdata->leds.leds[i].name; l 791 drivers/leds/leds-tca6507.c l->led_cdev.default_trigger l 793 drivers/leds/leds-tca6507.c l->led_cdev.brightness_set = tca6507_brightness_set; l 794 drivers/leds/leds-tca6507.c l->led_cdev.blink_set = tca6507_blink_set; l 795 drivers/leds/leds-tca6507.c l->bank = -1; l 797 drivers/leds/leds-tca6507.c &l->led_cdev); l 21 drivers/macintosh/macio_sysfs.c int l; l 24 drivers/macintosh/macio_sysfs.c l = strlen (compat) + 1; l 25 drivers/macintosh/macio_sysfs.c compat += l; l 26 drivers/macintosh/macio_sysfs.c cplen -= l; l 2544 drivers/macintosh/via-pmu.c int i, l, c; l 2548 drivers/macintosh/via-pmu.c l = pmu_data_len[c][0]; l 2549 drivers/macintosh/via-pmu.c if (l >= 0 && req->nbytes != l + 1) l 2559 drivers/macintosh/via-pmu.c if (l < 0) { l 2560 drivers/macintosh/via-pmu.c l = req->nbytes - 1; l 2561 drivers/macintosh/via-pmu.c polled_send_byte(l); l 2563 drivers/macintosh/via-pmu.c for (i = 1; i <= l; ++i) l 2566 drivers/macintosh/via-pmu.c l = pmu_data_len[c][1]; l 2567 drivers/macintosh/via-pmu.c if (l < 0) l 2568 drivers/macintosh/via-pmu.c l = polled_recv_byte(); l 2569 drivers/macintosh/via-pmu.c for (i = 0; i < l; ++i) l 162 drivers/macintosh/windfarm_smu_controls.c const char *l; l 168 drivers/macintosh/windfarm_smu_controls.c l = of_get_property(node, "location", NULL); l 169 drivers/macintosh/windfarm_smu_controls.c if (l == NULL) l 187 drivers/macintosh/windfarm_smu_controls.c if (!strcmp(l, "Rear Fan 0") || !strcmp(l, "Rear Fan") || l 188 drivers/macintosh/windfarm_smu_controls.c !strcmp(l, "Rear fan 0") || !strcmp(l, "Rear fan") || l 189 drivers/macintosh/windfarm_smu_controls.c !strcmp(l, "CPU A EXHAUST")) l 191 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "Rear Fan 1") || !strcmp(l, "Rear fan 1") || l 192 drivers/macintosh/windfarm_smu_controls.c !strcmp(l, "CPU B EXHAUST")) l 194 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "Front Fan 0") || !strcmp(l, "Front Fan") || l 195 drivers/macintosh/windfarm_smu_controls.c !strcmp(l, "Front fan 0") || !strcmp(l, "Front fan") || l 196 drivers/macintosh/windfarm_smu_controls.c !strcmp(l, "CPU A INTAKE")) l 198 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "Front Fan 1") || !strcmp(l, "Front fan 1") || l 199 drivers/macintosh/windfarm_smu_controls.c !strcmp(l, "CPU B INTAKE")) l 201 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "CPU A PUMP")) l 203 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "CPU B PUMP")) l 205 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || l 206 drivers/macintosh/windfarm_smu_controls.c !strcmp(l, "EXPANSION SLOTS INTAKE")) l 208 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "Drive Bay") || !strcmp(l, "Drive bay") || l 209 drivers/macintosh/windfarm_smu_controls.c !strcmp(l, "DRIVE BAY A INTAKE")) l 211 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "BACKSIDE")) l 215 drivers/macintosh/windfarm_smu_controls.c if (!strcmp(l, "System Fan") || !strcmp(l, "System fan")) l 217 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "CPU Fan") || !strcmp(l, "CPU fan")) l 219 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "Hard Drive") || !strcmp(l, "Hard drive")) l 221 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "HDD Fan")) /* seen on iMac G5 iSight */ l 223 drivers/macintosh/windfarm_smu_controls.c else if (!strcmp(l, "ODD Fan")) /* same */ l 199 drivers/macintosh/windfarm_smu_sensors.c const char *l; l 205 drivers/macintosh/windfarm_smu_sensors.c l = of_get_property(node, "location", NULL); l 206 drivers/macintosh/windfarm_smu_sensors.c if (l == NULL) l 217 drivers/macintosh/windfarm_smu_sensors.c !strcmp(l, "CPU T-Diode")) { l 226 drivers/macintosh/windfarm_smu_sensors.c !strcmp(l, "CPU Current")) { l 235 drivers/macintosh/windfarm_smu_sensors.c !strcmp(l, "CPU Voltage")) { l 244 drivers/macintosh/windfarm_smu_sensors.c !strcmp(l, "Slots Power")) { l 168 drivers/mailbox/mailbox-test.c int l = 0; l 208 drivers/mailbox/mailbox-test.c while (l < MBOX_HEXDUMP_MAX_LEN) { l 211 drivers/mailbox/mailbox-test.c MBOX_BYTES_PER_LINE, 1, touser + l, l 215 drivers/mailbox/mailbox-test.c l += MBOX_HEXDUMP_LINE_LEN; l 216 drivers/mailbox/mailbox-test.c *(touser + (l - 1)) = '\n'; l 218 drivers/mailbox/mailbox-test.c *(touser + l) = '\0'; l 202 drivers/mailbox/omap-mailbox.c u32 l; l 208 drivers/mailbox/omap-mailbox.c l = mbox_read_reg(mbox->parent, irqenable); l 209 drivers/mailbox/omap-mailbox.c l |= bit; l 210 drivers/mailbox/omap-mailbox.c mbox_write_reg(mbox->parent, l, irqenable); l 711 drivers/mailbox/omap-mailbox.c u32 l; l 867 drivers/mailbox/omap-mailbox.c l = mbox_read_reg(mdev, MAILBOX_REVISION); l 868 drivers/mailbox/omap-mailbox.c dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l); l 177 drivers/md/bcache/alloc.c #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r)) l 178 drivers/md/bcache/alloc.c #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r)) l 131 drivers/md/bcache/bset.c int __bch_keylist_realloc(struct keylist *l, unsigned int u64s) l 133 drivers/md/bcache/bset.c size_t oldsize = bch_keylist_nkeys(l); l 135 drivers/md/bcache/bset.c uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p; l 150 drivers/md/bcache/bset.c memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize); l 152 drivers/md/bcache/bset.c l->keys_p = new_keys; l 153 drivers/md/bcache/bset.c l->top_p = new_keys + oldsize; l 158 drivers/md/bcache/bset.c struct bkey *bch_keylist_pop(struct keylist *l) l 160 drivers/md/bcache/bset.c struct bkey *k = l->keys; l 162 drivers/md/bcache/bset.c if (k == l->top) l 165 drivers/md/bcache/bset.c while (bkey_next(k) != l->top) l 168 drivers/md/bcache/bset.c return l->top = k; l 171 drivers/md/bcache/bset.c void bch_keylist_pop_front(struct keylist *l) l 173 drivers/md/bcache/bset.c l->top_p -= bkey_u64s(l->keys); l 175 drivers/md/bcache/bset.c memmove(l->keys, l 176 drivers/md/bcache/bset.c bkey_next(l->keys), l 177 drivers/md/bcache/bset.c bch_keylist_bytes(l)); l 599 drivers/md/bcache/bset.c struct bkey *l = is_power_of_2(j) l 607 drivers/md/bcache/bset.c BUG_ON(m < l || m > r); l 620 drivers/md/bcache/bset.c if (KEY_INODE(l) != KEY_INODE(r)) l 621 drivers/md/bcache/bset.c f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64; l 623 drivers/md/bcache/bset.c f->exponent = fls64(r->low ^ l->low); l 842 drivers/md/bcache/bset.c bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r) l 852 drivers/md/bcache/bset.c if (!bch_bkey_equal_header(l, r) || l 853 drivers/md/bcache/bset.c bkey_cmp(l, &START_KEY(r))) l 856 drivers/md/bcache/bset.c return b->ops->key_merge(b, l, r); l 939 drivers/md/bcache/bset.c struct bkey *l, *r; l 965 drivers/md/bcache/bset.c struct bkey *l, *r; l 998 drivers/md/bcache/bset.c l = cacheline_to_bkey(t, inorder, f->m); l 1010 drivers/md/bcache/bset.c l = cacheline_to_bkey(t, inorder, f->m); l 1012 drivers/md/bcache/bset.c l = t->data->start; l 1015 drivers/md/bcache/bset.c return (struct bset_search_iter) {l, r}; l 1039 drivers/md/bcache/bset.c i.l = t->data->start; l 1065 drivers/md/bcache/bset.c i.l != t->data->start && l 1067 drivers/md/bcache/bset.c inorder_to_tree(bkey_to_cacheline(t, i.l), t)), l 1074 drivers/md/bcache/bset.c while (likely(i.l != i.r) && l 1075 drivers/md/bcache/bset.c bkey_cmp(i.l, search) <= 0) l 1076 drivers/md/bcache/bset.c i.l = bkey_next(i.l); l 1078 drivers/md/bcache/bset.c return i.l; l 1087 drivers/md/bcache/bset.c static inline bool btree_iter_cmp(struct btree_iter_set l, l 1090 drivers/md/bcache/bset.c return bkey_cmp(l.k, r.k) > 0; l 190 drivers/md/bcache/bset.h bool (*sort_cmp)(struct btree_iter_set l, l 203 drivers/md/bcache/bset.h struct bkey *l, struct bkey *r); l 301 drivers/md/bcache/bset.h bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r); l 413 drivers/md/bcache/bset.h static __always_inline int64_t bkey_cmp(const struct bkey *l, l 416 drivers/md/bcache/bset.h return unlikely(KEY_INODE(l) != KEY_INODE(r)) l 417 drivers/md/bcache/bset.h ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) l 418 drivers/md/bcache/bset.h : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); l 475 drivers/md/bcache/bset.h static inline bool bch_bkey_equal_header(const struct bkey *l, l 478 drivers/md/bcache/bset.h return (KEY_DIRTY(l) == KEY_DIRTY(r) && l 479 drivers/md/bcache/bset.h KEY_PTRS(l) == KEY_PTRS(r) && l 480 drivers/md/bcache/bset.h KEY_CSUM(l) == KEY_CSUM(r)); l 500 drivers/md/bcache/bset.h static inline void bch_keylist_init(struct keylist *l) l 502 drivers/md/bcache/bset.h l->top_p = l->keys_p = l->inline_keys; l 505 drivers/md/bcache/bset.h static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k) l 507 drivers/md/bcache/bset.h l->keys = k; l 508 drivers/md/bcache/bset.h l->top = bkey_next(k); l 511 drivers/md/bcache/bset.h static inline void bch_keylist_push(struct keylist *l) l 513 drivers/md/bcache/bset.h l->top = bkey_next(l->top); l 516 drivers/md/bcache/bset.h static inline void bch_keylist_add(struct keylist *l, struct bkey *k) l 518 drivers/md/bcache/bset.h bkey_copy(l->top, k); l 519 drivers/md/bcache/bset.h bch_keylist_push(l); l 522 drivers/md/bcache/bset.h static inline bool bch_keylist_empty(struct keylist *l) l 524 drivers/md/bcache/bset.h return l->top == l->keys; l 527 drivers/md/bcache/bset.h static inline void bch_keylist_reset(struct keylist *l) l 529 drivers/md/bcache/bset.h l->top = l->keys; l 532 drivers/md/bcache/bset.h static inline void bch_keylist_free(struct keylist *l) l 534 drivers/md/bcache/bset.h if (l->keys_p != l->inline_keys) l 535 drivers/md/bcache/bset.h kfree(l->keys_p); l 538 drivers/md/bcache/bset.h static inline size_t bch_keylist_nkeys(struct keylist *l) l 540 drivers/md/bcache/bset.h return l->top_p - l->keys_p; l 543 drivers/md/bcache/bset.h static inline size_t bch_keylist_bytes(struct keylist *l) l 545 drivers/md/bcache/bset.h return bch_keylist_nkeys(l) * sizeof(uint64_t); l 548 drivers/md/bcache/bset.h struct bkey *bch_keylist_pop(struct keylist *l); l 549 drivers/md/bcache/bset.h void bch_keylist_pop_front(struct keylist *l); l 550 drivers/md/bcache/bset.h int __bch_keylist_realloc(struct keylist *l, unsigned int u64s); l 124 drivers/md/bcache/btree.c int _r, l = (b)->level - 1; \ l 125 drivers/md/bcache/btree.c bool _w = l <= (op)->lock; \ l 126 drivers/md/bcache/btree.c struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ l 2455 drivers/md/bcache/btree.c static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) l 2458 drivers/md/bcache/btree.c if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0) l 2460 drivers/md/bcache/btree.c if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0) l 2465 drivers/md/bcache/btree.c static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, l 2468 drivers/md/bcache/btree.c return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1); l 39 drivers/md/bcache/extents.c static bool bch_key_sort_cmp(struct btree_iter_set l, l 42 drivers/md/bcache/extents.c int64_t c = bkey_cmp(l.k, r.k); l 44 drivers/md/bcache/extents.c return c ? c > 0 : l.k < r.k; l 258 drivers/md/bcache/extents.c static bool bch_extent_sort_cmp(struct btree_iter_set l, l 261 drivers/md/bcache/extents.c int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); l 263 drivers/md/bcache/extents.c return c ? c > 0 : l.k < r.k; l 575 drivers/md/bcache/extents.c static uint64_t merge_chksums(struct bkey *l, struct bkey *r) l 577 drivers/md/bcache/extents.c return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) & l 582 drivers/md/bcache/extents.c struct bkey *l, l 591 drivers/md/bcache/extents.c for (i = 0; i < KEY_PTRS(l); i++) l 592 drivers/md/bcache/extents.c if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || l 593 drivers/md/bcache/extents.c PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) l 599 drivers/md/bcache/extents.c if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) { l 600 drivers/md/bcache/extents.c SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l)); l 601 drivers/md/bcache/extents.c SET_KEY_SIZE(l, USHRT_MAX); l 603 drivers/md/bcache/extents.c bch_cut_front(l, r); l 607 drivers/md/bcache/extents.c if (KEY_CSUM(l)) { l 609 drivers/md/bcache/extents.c l->ptr[KEY_PTRS(l)] = merge_chksums(l, r); l 611 drivers/md/bcache/extents.c SET_KEY_CSUM(l, 0); l 614 drivers/md/bcache/extents.c SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r)); l 615 drivers/md/bcache/extents.c SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r)); l 189 drivers/md/bcache/journal.c unsigned int i, l, r, m; l 205 drivers/md/bcache/journal.c l = (i * 2654435769U) % ca->sb.njournal_buckets; l 207 drivers/md/bcache/journal.c if (test_bit(l, bitmap)) l 210 drivers/md/bcache/journal.c if (read_bucket(l)) l 220 drivers/md/bcache/journal.c for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); l 221 drivers/md/bcache/journal.c l < ca->sb.njournal_buckets; l 222 drivers/md/bcache/journal.c l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l 223 drivers/md/bcache/journal.c l + 1)) l 224 drivers/md/bcache/journal.c if (read_bucket(l)) l 228 drivers/md/bcache/journal.c if (l == ca->sb.njournal_buckets) l 234 drivers/md/bcache/journal.c m = l; l 235 drivers/md/bcache/journal.c r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1); l 236 drivers/md/bcache/journal.c pr_debug("starting binary search, l %u r %u", l, r); l 238 drivers/md/bcache/journal.c while (l + 1 < r) { l 242 drivers/md/bcache/journal.c m = (l + r) >> 1; l 247 drivers/md/bcache/journal.c l = m; l 258 drivers/md/bcache/journal.c l = m; l 261 drivers/md/bcache/journal.c if (!l--) l 262 drivers/md/bcache/journal.c l = ca->sb.njournal_buckets - 1; l 264 drivers/md/bcache/journal.c if (l == m) l 267 drivers/md/bcache/journal.c if (test_bit(l, bitmap)) l 270 drivers/md/bcache/journal.c if (!read_bucket(l)) l 161 drivers/md/bcache/journal.h #define journal_pin_cmp(c, l, r) \ l 162 drivers/md/bcache/journal.h (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r))) l 185 drivers/md/bcache/movinggc.c static bool bucket_cmp(struct bucket *l, struct bucket *r) l 187 drivers/md/bcache/movinggc.c return GC_SECTORS_USED(l) < GC_SECTORS_USED(r); l 102 drivers/md/bcache/request.c static int bch_keylist_realloc(struct keylist *l, unsigned int u64s, l 105 drivers/md/bcache/request.c size_t oldsize = bch_keylist_nkeys(l); l 117 drivers/md/bcache/request.c return __bch_keylist_realloc(l, u64s); l 1854 drivers/md/bcache/super.c struct journal_replay *l; l 2031 drivers/md/bcache/super.c l = list_first_entry(&journal, struct journal_replay, list); l 2032 drivers/md/bcache/super.c list_del(&l->list); l 2033 drivers/md/bcache/super.c kfree(l); l 987 drivers/md/bcache/sysfs.c static int __bch_cache_cmp(const void *l, const void *r) l 990 drivers/md/bcache/sysfs.c return *((uint16_t *)r) - *((uint16_t *)l); l 219 drivers/md/bcache/util.h #define fifo_swap(l, r) \ l 221 drivers/md/bcache/util.h swap((l)->front, (r)->front); \ l 222 drivers/md/bcache/util.h swap((l)->back, (r)->back); \ l 223 drivers/md/bcache/util.h swap((l)->size, (r)->size); \ l 224 drivers/md/bcache/util.h swap((l)->mask, (r)->mask); \ l 225 drivers/md/bcache/util.h swap((l)->data, (r)->data); \ l 1555 drivers/md/dm-bufio.c int l; l 1562 drivers/md/dm-bufio.c for (l = 0; l < LIST_SIZE; l++) { l 1563 drivers/md/dm-bufio.c list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { l 116 drivers/md/dm-cache-policy-smq.c static void l_init(struct ilist *l) l 118 drivers/md/dm-cache-policy-smq.c l->nr_elts = 0; l 119 drivers/md/dm-cache-policy-smq.c l->head = l->tail = INDEXER_NULL; l 122 drivers/md/dm-cache-policy-smq.c static struct entry *l_head(struct entry_space *es, struct ilist *l) l 124 drivers/md/dm-cache-policy-smq.c return to_entry(es, l->head); l 127 drivers/md/dm-cache-policy-smq.c static struct entry *l_tail(struct entry_space *es, struct ilist *l) l 129 drivers/md/dm-cache-policy-smq.c return to_entry(es, l->tail); l 142 drivers/md/dm-cache-policy-smq.c static bool l_empty(struct ilist *l) l 144 drivers/md/dm-cache-policy-smq.c return l->head == INDEXER_NULL; l 147 drivers/md/dm-cache-policy-smq.c static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e) l 149 drivers/md/dm-cache-policy-smq.c struct entry *head = l_head(es, l); l 151 drivers/md/dm-cache-policy-smq.c e->next = l->head; l 155 drivers/md/dm-cache-policy-smq.c head->prev = l->head = to_index(es, e); l 157 drivers/md/dm-cache-policy-smq.c l->head = l->tail = to_index(es, e); l 160 drivers/md/dm-cache-policy-smq.c l->nr_elts++; l 163 drivers/md/dm-cache-policy-smq.c static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e) l 165 drivers/md/dm-cache-policy-smq.c struct entry *tail = l_tail(es, l); l 168 drivers/md/dm-cache-policy-smq.c e->prev = l->tail; l 171 drivers/md/dm-cache-policy-smq.c tail->next = l->tail = to_index(es, e); l 173 drivers/md/dm-cache-policy-smq.c l->head = l->tail = to_index(es, e); l 176 drivers/md/dm-cache-policy-smq.c l->nr_elts++; l 179 drivers/md/dm-cache-policy-smq.c static void l_add_before(struct entry_space *es, struct ilist *l, l 185 drivers/md/dm-cache-policy-smq.c l_add_head(es, l, e); l 193 drivers/md/dm-cache-policy-smq.c l->nr_elts++; l 197 drivers/md/dm-cache-policy-smq.c static void l_del(struct entry_space *es, struct ilist *l, struct entry *e) l 205 drivers/md/dm-cache-policy-smq.c l->head = e->next; l 210 drivers/md/dm-cache-policy-smq.c l->tail = e->prev; l 213 drivers/md/dm-cache-policy-smq.c l->nr_elts--; l 216 drivers/md/dm-cache-policy-smq.c static struct entry *l_pop_head(struct entry_space *es, struct ilist *l) l 220 drivers/md/dm-cache-policy-smq.c for (e = l_head(es, l); e; e = l_next(es, e)) l 222 drivers/md/dm-cache-policy-smq.c l_del(es, l, e); l 229 drivers/md/dm-cache-policy-smq.c static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l) l 233 drivers/md/dm-cache-policy-smq.c for (e = l_tail(es, l); e; e = l_prev(es, e)) l 235 drivers/md/dm-cache-policy-smq.c l_del(es, l, e); l 430 drivers/md/dm-cache-policy-smq.c struct ilist *l, *l_above; l 436 drivers/md/dm-cache-policy-smq.c l = q->qs + level; l 442 drivers/md/dm-cache-policy-smq.c while (l->nr_elts < target) { l 450 drivers/md/dm-cache-policy-smq.c l_add_tail(q->es, l, e); l 457 drivers/md/dm-cache-policy-smq.c while (l->nr_elts > target) { l 458 drivers/md/dm-cache-policy-smq.c e = l_pop_tail(q->es, l); l 2176 drivers/md/dm-integrity.c unsigned k, l, next_loop; l 2241 drivers/md/dm-integrity.c for (l = j; l < k; l++) { l 2242 drivers/md/dm-integrity.c remove_journal_node(ic, §ion_node[l]); l 2248 drivers/md/dm-integrity.c for (l = j; l < k; l++) { l 2250 drivers/md/dm-integrity.c struct journal_entry *je2 = access_journal_entry(ic, i, l); l 2259 drivers/md/dm-integrity.c integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), l 2260 drivers/md/dm-integrity.c (char *)access_journal_data(ic, i, l), test_tag); l 1157 drivers/md/dm-ioctl.c size_t l; l 1188 drivers/md/dm-ioctl.c l = strlen(outptr) + 1; l 1189 drivers/md/dm-ioctl.c if (l == remaining) { l 1194 drivers/md/dm-ioctl.c outptr += l; l 263 drivers/md/dm-log.c static inline void log_set_bit(struct log_c *l, l 267 drivers/md/dm-log.c l->touched_cleaned = 1; l 270 drivers/md/dm-log.c static inline void log_clear_bit(struct log_c *l, l 274 drivers/md/dm-log.c l->touched_dirtied = 1; l 254 drivers/md/dm-raid.c static void rs_config_backup(struct raid_set *rs, struct rs_layout *l) l 258 drivers/md/dm-raid.c l->new_level = mddev->new_level; l 259 drivers/md/dm-raid.c l->new_layout = mddev->new_layout; l 260 drivers/md/dm-raid.c l->new_chunk_sectors = mddev->new_chunk_sectors; l 263 drivers/md/dm-raid.c static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) l 267 drivers/md/dm-raid.c mddev->new_level = l->new_level; l 268 drivers/md/dm-raid.c mddev->new_layout = l->new_layout; l 269 drivers/md/dm-raid.c mddev->new_chunk_sectors = l->new_chunk_sectors; l 531 drivers/md/dm-snap.c struct dm_snapshot *l; l 534 drivers/md/dm-snap.c list_for_each_entry(l, &o->snapshots, list) l 535 drivers/md/dm-snap.c if (l->store->chunk_size < s->store->chunk_size) l 537 drivers/md/dm-snap.c list_add_tail(&s->list, &l->list); l 759 drivers/md/dm-snap.c struct hlist_bl_head *l; l 763 drivers/md/dm-snap.c l = &eh->table[exception_hash(eh, new_e->old_chunk)]; l 770 drivers/md/dm-snap.c hlist_bl_for_each_entry(e, pos, l, hash_list) { l 801 drivers/md/dm-snap.c hlist_bl_add_head(&new_e->hash_list, l); l 243 drivers/md/dm-stats.c struct list_head *l; l 363 drivers/md/dm-stats.c list_for_each(l, &stats->list) { l 364 drivers/md/dm-stats.c tmp_s = container_of(l, struct dm_stat, list_entry); l 378 drivers/md/dm-stats.c list_add_tail_rcu(&s->list_entry, l); l 98 drivers/md/dm-table.c unsigned int l, unsigned int n) l 100 drivers/md/dm-table.c return t->index[l] + (n * KEYS_PER_NODE); l 107 drivers/md/dm-table.c static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) l 109 drivers/md/dm-table.c for (; l < t->depth - 1; l++) l 112 drivers/md/dm-table.c if (n >= t->counts[l]) l 115 drivers/md/dm-table.c return get_node(t, l, n)[KEYS_PER_NODE - 1]; l 122 drivers/md/dm-table.c static int setup_btree_index(unsigned int l, struct dm_table *t) l 127 drivers/md/dm-table.c for (n = 0U; n < t->counts[l]; n++) { l 128 drivers/md/dm-table.c node = get_node(t, l, n); l 131 drivers/md/dm-table.c node[k] = high(t, l + 1, get_child(n, k)); l 265 drivers/md/dm-table.c static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) l 269 drivers/md/dm-table.c list_for_each_entry (dd, l, list) l 1365 drivers/md/dm-table.c unsigned int l, n = 0, k = 0; l 1371 drivers/md/dm-table.c for (l = 0; l < t->depth; l++) { l 1373 drivers/md/dm-table.c node = get_node(t, l, n); l 781 drivers/md/dm.c static struct table_device *find_table_device(struct list_head *l, dev_t dev, l 786 drivers/md/dm.c list_for_each_entry(td, l, list) l 7942 drivers/md/md.c loff_t l = *pos; l 7945 drivers/md/md.c if (l >= 0x10000) l 7947 drivers/md/md.c if (!l--) l 7953 drivers/md/md.c if (!l--) { l 7960 drivers/md/md.c if (!l--) l 200 drivers/md/persistent-data/dm-btree-remove.c struct child *l, struct child *r) l 202 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *left = l->n; l 270 drivers/md/persistent-data/dm-btree-remove.c struct child *l, struct child *c, struct child *r, l 294 drivers/md/persistent-data/dm-btree-remove.c __rebalance2(info, parent, l, r); l 301 drivers/md/persistent-data/dm-btree-remove.c struct child *l, struct child *c, struct child *r, l 348 drivers/md/persistent-data/dm-btree-remove.c struct child *l, struct child *c, struct child *r) l 350 drivers/md/persistent-data/dm-btree-remove.c struct btree_node *left = l->n; l 364 drivers/md/persistent-data/dm-btree-remove.c delete_center_node(info, parent, l, c, r, left, center, right, l 367 drivers/md/persistent-data/dm-btree-remove.c redistribute3(info, parent, l, c, r, left, center, right, l 5065 drivers/md/raid5.c struct list_head *l = conf->delayed_list.next; l 5067 drivers/md/raid5.c sh = list_entry(l, struct stripe_head, lru); l 5068 drivers/md/raid5.c list_del_init(l); l 364 drivers/media/common/saa7146/saa7146_hlp.c int l = 0, r = 0, t = 0, b = 0; l 387 drivers/media/common/saa7146/saa7146_hlp.c l = x[i]; l 393 drivers/media/common/saa7146/saa7146_hlp.c pixel_list[ 2*i ] = min_t(int, l, width); l 1884 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c int l = tpg->vflip ? 15 - line : line; \ l 1886 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c ((y * step + l) / (vdiv * div)) * tpg->bytesperline[p] + \ l 267 drivers/media/dvb-core/dvb_net.c int total_ext_len = 0, l; l 271 drivers/media/dvb-core/dvb_net.c l = handle_one_ule_extension( p ); l 272 drivers/media/dvb-core/dvb_net.c if (l < 0) l 273 drivers/media/dvb-core/dvb_net.c return l; /* Stop extension header processing and discard SNDU. */ l 274 drivers/media/dvb-core/dvb_net.c total_ext_len += l; l 277 drivers/media/dvb-core/dvb_net.c l, total_ext_len); l 709 drivers/media/dvb-core/dvb_net.c int l = handle_ule_extensions(h->priv); l 711 drivers/media/dvb-core/dvb_net.c if (l < 0) { l 721 drivers/media/dvb-core/dvb_net.c skb_pull(h->priv->ule_skb, l); l 71 drivers/media/dvb-frontends/bcm3510.c #define dbufout(b,l,m) {\ l 73 drivers/media/dvb-frontends/bcm3510.c for (i = 0; i < l; i++) \ l 637 drivers/media/dvb-frontends/dib0070.c u16 l, r, *n; l 660 drivers/media/dvb-frontends/dib0070.c l = pgm_read_word(n++); l 661 drivers/media/dvb-frontends/dib0070.c while (l) { l 666 drivers/media/dvb-frontends/dib0070.c } while (--l); l 667 drivers/media/dvb-frontends/dib0070.c l = pgm_read_word(n++); l 1481 drivers/media/dvb-frontends/dib0090.c u16 l, r; l 1483 drivers/media/dvb-frontends/dib0090.c l = pgm_read_word(n++); l 1484 drivers/media/dvb-frontends/dib0090.c while (l) { l 1489 drivers/media/dvb-frontends/dib0090.c } while (--l); l 1490 drivers/media/dvb-frontends/dib0090.c l = pgm_read_word(n++); l 134 drivers/media/dvb-frontends/dib7000m.c u16 l = 0, r, *n; l 136 drivers/media/dvb-frontends/dib7000m.c l = *n++; l 137 drivers/media/dvb-frontends/dib7000m.c while (l) { l 146 drivers/media/dvb-frontends/dib7000m.c } while (--l); l 147 drivers/media/dvb-frontends/dib7000m.c l = *n++; l 158 drivers/media/dvb-frontends/dib7000p.c u16 l = 0, r, *n; l 160 drivers/media/dvb-frontends/dib7000p.c l = *n++; l 161 drivers/media/dvb-frontends/dib7000p.c while (l) { l 167 drivers/media/dvb-frontends/dib7000p.c } while (--l); l 168 drivers/media/dvb-frontends/dib7000p.c l = *n++; l 1103 drivers/media/dvb-frontends/dib8000.c u16 l = 0, r; l 1106 drivers/media/dvb-frontends/dib8000.c l = *n++; l 1107 drivers/media/dvb-frontends/dib8000.c while (l) { l 1112 drivers/media/dvb-frontends/dib8000.c } while (--l); l 1113 drivers/media/dvb-frontends/dib8000.c l = *n++; l 3580 drivers/media/dvb-frontends/dib8000.c int l, i, active, time, time_slave = 0; l 3682 drivers/media/dvb-frontends/dib8000.c for (l = 0; (l < MAX_NUMBER_OF_FRONTENDS) && (state->fe[l] != NULL); l++) { l 3683 drivers/media/dvb-frontends/dib8000.c if (l != index_frontend) { /* and for all frontend except the successful one */ l 3684 drivers/media/dvb-frontends/dib8000.c dprintk("Restarting frontend %d\n", l); l 3685 drivers/media/dvb-frontends/dib8000.c dib8000_tune_restart_from_demod(state->fe[l]); l 3687 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.isdbt_sb_mode = state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode; l 3688 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.inversion = state->fe[index_frontend]->dtv_property_cache.inversion; l 3689 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.transmission_mode = state->fe[index_frontend]->dtv_property_cache.transmission_mode; l 3690 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.guard_interval = state->fe[index_frontend]->dtv_property_cache.guard_interval; l 3691 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.isdbt_partial_reception = state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception; l 3693 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.layer[i].segment_count = state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count; l 3694 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.layer[i].interleaving = state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving; l 3695 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.layer[i].fec = state->fe[index_frontend]->dtv_property_cache.layer[i].fec; l 3696 drivers/media/dvb-frontends/dib8000.c state->fe[l]->dtv_property_cache.layer[i].modulation = state->fe[index_frontend]->dtv_property_cache.layer[i].modulation; l 236 drivers/media/dvb-frontends/dib9000.c u32 l; l 261 drivers/media/dvb-frontends/dib9000.c l = len < chunk_size ? len : chunk_size; l 262 drivers/media/dvb-frontends/dib9000.c state->msg[1].len = l; l 270 drivers/media/dvb-frontends/dib9000.c b += l; l 271 drivers/media/dvb-frontends/dib9000.c len -= l; l 274 drivers/media/dvb-frontends/dib9000.c reg += l / 2; l 320 drivers/media/dvb-frontends/dib9000.c u32 l; l 345 drivers/media/dvb-frontends/dib9000.c l = len < chunk_size ? len : chunk_size; l 346 drivers/media/dvb-frontends/dib9000.c state->msg[0].len = l + 2; l 347 drivers/media/dvb-frontends/dib9000.c memcpy(&state->i2c_write_buffer[2], buf, l); l 351 drivers/media/dvb-frontends/dib9000.c buf += l; l 352 drivers/media/dvb-frontends/dib9000.c len -= l; l 355 drivers/media/dvb-frontends/dib9000.c reg += l / 2; l 23 drivers/media/dvb-frontends/drxd_firm.c #define WRBLOCK(a, l) ADDRESS(a), LENGTH(l) l 3569 drivers/media/dvb-frontends/stv090x.c u32 reg, h, m, l; l 3584 drivers/media/dvb-frontends/stv090x.c l = STV090x_GETFIELD_Px(reg, ERR_CNT20_FIELD); l 3586 drivers/media/dvb-frontends/stv090x.c *per = ((h << 16) | (m << 8) | l); l 220 drivers/media/i2c/cx25840/cx25840-vbi.c int id1, id2, l, err = 0; l 231 drivers/media/i2c/cx25840/cx25840-vbi.c l = p[2] & 0x3f; l 232 drivers/media/i2c/cx25840/cx25840-vbi.c l += state->vbi_line_offset; l 258 drivers/media/i2c/cx25840/cx25840-vbi.c vbi->line = err ? 0 : l; l 500 drivers/media/i2c/ir-kbd-i2c.c int rep, i, l, p = 0, s, c = 0; l 522 drivers/media/i2c/ir-kbd-i2c.c l = DIV_ROUND_CLOSEST((XTAL_FREQ / 1000) * txbuf[i], 40000); l 526 drivers/media/i2c/ir-kbd-i2c.c ARRAY_SIZE(code_block->space), l); l 536 drivers/media/i2c/ir-kbd-i2c.c ARRAY_SIZE(code_block->pulse), l); l 252 drivers/media/pci/cx18/cx18-av-vbi.c int did, sdid, l, err = 0; l 267 drivers/media/pci/cx18/cx18-av-vbi.c l = anc->idid[0] & 0x3f; l 268 drivers/media/pci/cx18/cx18-av-vbi.c l += state->slicer_line_offset; l 295 drivers/media/pci/cx18/cx18-av-vbi.c vbi->line = err ? 0 : l; l 94 drivers/media/pci/cx18/cx18-ioctl.c int f, l; l 98 drivers/media/pci/cx18/cx18-ioctl.c for (l = 0; l < 24; l++) l 99 drivers/media/pci/cx18/cx18-ioctl.c fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal); l 109 drivers/media/pci/cx18/cx18-ioctl.c int f, l; l 113 drivers/media/pci/cx18/cx18-ioctl.c for (l = 0; l < 24; l++) { l 114 drivers/media/pci/cx18/cx18-ioctl.c fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal); l 115 drivers/media/pci/cx18/cx18-ioctl.c set |= fmt->service_lines[f][l]; l 124 drivers/media/pci/cx18/cx18-ioctl.c int f, l; l 128 drivers/media/pci/cx18/cx18-ioctl.c for (l = 0; l < 24; l++) l 129 drivers/media/pci/cx18/cx18-ioctl.c set |= fmt->service_lines[f][l]; l 635 drivers/media/pci/cx18/cx18-ioctl.c int f, l; l 642 drivers/media/pci/cx18/cx18-ioctl.c for (l = 0; l < 24; l++) { l 643 drivers/media/pci/cx18/cx18-ioctl.c if (valid_service_line(f, l, cx->is_50hz)) { l 648 drivers/media/pci/cx18/cx18-ioctl.c cap->service_lines[f][l] = set; l 651 drivers/media/pci/cx18/cx18-ioctl.c cap->service_lines[f][l] = 0; l 50 drivers/media/pci/cx18/cx18-vbi.c int f, l; l 55 drivers/media/pci/cx18/cx18-vbi.c l = sdata->line - 6; l 58 drivers/media/pci/cx18/cx18-vbi.c l += 18; l 59 drivers/media/pci/cx18/cx18-vbi.c if (l < 32) l 60 drivers/media/pci/cx18/cx18-vbi.c linemask[0] |= (1 << l); l 62 drivers/media/pci/cx18/cx18-vbi.c linemask[1] |= (1 << (l - 32)); l 92 drivers/media/pci/cx88/cx88-tvaudio.c static void set_audio_registers(struct cx88_core *core, const struct rlist *l) l 96 drivers/media/pci/cx88/cx88-tvaudio.c for (i = 0; l[i].reg; i++) { l 97 drivers/media/pci/cx88/cx88-tvaudio.c switch (l[i].reg) { l 104 drivers/media/pci/cx88/cx88-tvaudio.c cx_writeb(l[i].reg, l[i].val); l 107 drivers/media/pci/cx88/cx88-tvaudio.c cx_write(l[i].reg, l[i].val); l 1848 drivers/media/pci/ddbridge/ddbridge-core.c u32 l = port->lnr; l 1849 drivers/media/pci/ddbridge/ddbridge-core.c struct ddb_link *link = &dev->link[l]; l 2367 drivers/media/pci/ddbridge/ddbridge-core.c u32 i, l, p; l 2372 drivers/media/pci/ddbridge/ddbridge-core.c for (p = l = 0; l < DDB_MAX_LINK; l++) { l 2373 drivers/media/pci/ddbridge/ddbridge-core.c info = dev->link[l].info; l 2383 drivers/media/pci/ddbridge/ddbridge-core.c port->lnr = l; l 2428 drivers/media/pci/ddbridge/ddbridge-core.c switch (dev->link[l].info->type) { l 3291 drivers/media/pci/ddbridge/ddbridge-core.c u32 l = link->nr; l 3301 drivers/media/pci/ddbridge/ddbridge-core.c ddb_irq_set(dev, l, link->info->tempmon_irq, temp_handler, link); l 3341 drivers/media/pci/ddbridge/ddbridge-core.c u32 l; l 3343 drivers/media/pci/ddbridge/ddbridge-core.c for (l = 0; l < DDB_MAX_LINK; l++) { l 3344 drivers/media/pci/ddbridge/ddbridge-core.c link = &dev->link[l]; l 3350 drivers/media/pci/ddbridge/ddbridge-core.c ddbwritel(dev, 0, DDB_LINK_TAG(l) | BOARD_CONTROL); l 3353 drivers/media/pci/ddbridge/ddbridge-core.c DDB_LINK_TAG(l) | BOARD_CONTROL); l 3357 drivers/media/pci/ddbridge/ddbridge-core.c DDB_LINK_TAG(l) | BOARD_CONTROL); l 200 drivers/media/pci/ddbridge/ddbridge-i2c.c u32 i, j, num = 0, l, base; l 205 drivers/media/pci/ddbridge/ddbridge-i2c.c for (l = 0; l < DDB_MAX_LINK; l++) { l 206 drivers/media/pci/ddbridge/ddbridge-i2c.c if (!dev->link[l].info) l 208 drivers/media/pci/ddbridge/ddbridge-i2c.c regmap = dev->link[l].info->regmap; l 213 drivers/media/pci/ddbridge/ddbridge-i2c.c if (!(dev->link[l].info->i2c_mask & (1 << i))) l 216 drivers/media/pci/ddbridge/ddbridge-i2c.c ddb_irq_set(dev, l, i + base, i2c_handler, i2c); l 217 drivers/media/pci/ddbridge/ddbridge-i2c.c stat = ddb_i2c_add(dev, i2c, regmap, l, i, num); l 366 drivers/media/pci/ddbridge/ddbridge-max.c u32 l = link->nr; l 370 drivers/media/pci/ddbridge/ddbridge-max.c dev_info(dev->dev, "Set fmode link %u = %u\n", l, fm); l 374 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_sat(dev, l, 0, fmode_sat, 0, 0); l 376 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_sat(dev, l, 1, fmode_sat, 0, 1); l 377 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_sat(dev, l, 2, fmode_sat, 1, 0); l 379 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_sat(dev, l, 1, fmode_sat, 1, 0); l 380 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_sat(dev, l, 2, fmode_sat, 0, 1); l 382 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_sat(dev, l, 3, fmode_sat, 1, 1); l 384 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_tone(dev, l, 0, SEC_TONE_OFF); l 386 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_tone(dev, l, 1, SEC_TONE_OFF); l 387 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_tone(dev, l, 2, SEC_TONE_ON); l 389 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_tone(dev, l, 1, SEC_TONE_ON); l 390 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_tone(dev, l, 2, SEC_TONE_OFF); l 392 drivers/media/pci/ddbridge/ddbridge-max.c lnb_set_tone(dev, l, 3, SEC_TONE_ON); l 85 drivers/media/pci/ivtv/ivtv-ioctl.c int f, l; l 89 drivers/media/pci/ivtv/ivtv-ioctl.c for (l = 0; l < 24; l++) { l 90 drivers/media/pci/ivtv/ivtv-ioctl.c fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal); l 97 drivers/media/pci/ivtv/ivtv-ioctl.c int f, l; l 100 drivers/media/pci/ivtv/ivtv-ioctl.c for (l = 0; l < 24; l++) { l 101 drivers/media/pci/ivtv/ivtv-ioctl.c fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal); l 108 drivers/media/pci/ivtv/ivtv-ioctl.c int f, l; l 112 drivers/media/pci/ivtv/ivtv-ioctl.c for (l = 0; l < 24; l++) { l 113 drivers/media/pci/ivtv/ivtv-ioctl.c set |= fmt->service_lines[f][l]; l 1223 drivers/media/pci/ivtv/ivtv-ioctl.c int f, l; l 1227 drivers/media/pci/ivtv/ivtv-ioctl.c for (l = 0; l < 24; l++) { l 1228 drivers/media/pci/ivtv/ivtv-ioctl.c if (valid_service_line(f, l, itv->is_50hz)) l 1229 drivers/media/pci/ivtv/ivtv-ioctl.c cap->service_lines[f][l] = set; l 1248 drivers/media/pci/ivtv/ivtv-ioctl.c for (l = 0; l < 24; l++) l 1249 drivers/media/pci/ivtv/ivtv-ioctl.c set |= cap->service_lines[f][l]; l 191 drivers/media/pci/ivtv/ivtv-vbi.c int f, l; l 196 drivers/media/pci/ivtv/ivtv-vbi.c l = itv->vbi.sliced_data[i].line - 6; l 199 drivers/media/pci/ivtv/ivtv-vbi.c l += 18; l 200 drivers/media/pci/ivtv/ivtv-vbi.c if (l < 32) l 201 drivers/media/pci/ivtv/ivtv-vbi.c linemask[0] |= (1 << l); l 203 drivers/media/pci/ivtv/ivtv-vbi.c linemask[1] |= (1 << (l - 32)); l 237 drivers/media/pci/ivtv/ivtv-vbi.c int i, l, id2; l 278 drivers/media/pci/ivtv/ivtv-vbi.c l = (i < 18) ? i + 6 : i - 18 + 6; l 279 drivers/media/pci/ivtv/ivtv-vbi.c itv->vbi.sliced_dec_data[line].line = l; l 134 drivers/media/pci/ngene/ngene-dvb.c int i, l; l 136 drivers/media/pci/ngene/ngene-dvb.c l = len - sizeof(fill_ts); l 137 drivers/media/pci/ngene/ngene-dvb.c if (l <= 0) l 140 drivers/media/pci/ngene/ngene-dvb.c for (i = 0; i < l; i++) { l 61 drivers/media/pci/saa7164/saa7164-encoder.c struct list_head *c, *n, *p, *q, *l, *v; l 84 drivers/media/pci/saa7164/saa7164-encoder.c list_for_each_safe(l, v, &port->list_buf_free.list) { l 85 drivers/media/pci/saa7164/saa7164-encoder.c ubuf = list_entry(l, struct saa7164_user_buffer, list); l 86 drivers/media/pci/saa7164/saa7164-encoder.c list_del(l); l 30 drivers/media/pci/saa7164/saa7164-vbi.c struct list_head *c, *n, *p, *q, *l, *v; l 53 drivers/media/pci/saa7164/saa7164-vbi.c list_for_each_safe(l, v, &port->list_buf_free.list) { l 54 drivers/media/pci/saa7164/saa7164-vbi.c ubuf = list_entry(l, struct saa7164_user_buffer, list); l 55 drivers/media/pci/saa7164/saa7164-vbi.c list_del(l); l 633 drivers/media/pci/ttpci/av7110_av.c int c, c2, l, add; l 691 drivers/media/pci/ttpci/av7110_av.c l = c2+c; l 693 drivers/media/pci/ttpci/av7110_av.c l = (TS_SIZE - 4) - p->pos; l 694 drivers/media/pci/ttpci/av7110_av.c memcpy(p->pes + p->pos, buf, l); l 695 drivers/media/pci/ttpci/av7110_av.c c += l; l 696 drivers/media/pci/ttpci/av7110_av.c p->pos += l; l 714 drivers/media/pci/ttpci/av7110_av.c l = length - c; l 715 drivers/media/pci/ttpci/av7110_av.c rest = l % (TS_SIZE - 4); l 716 drivers/media/pci/ttpci/av7110_av.c l -= rest; l 717 drivers/media/pci/ttpci/av7110_av.c p_to_t(buf + c, l, pid, &p->counter, p->feed); l 718 drivers/media/pci/ttpci/av7110_av.c memcpy(p->pes, buf + c + l, rest); l 764 drivers/media/pci/ttpci/av7110_av.c int l, pes_start; l 793 drivers/media/pci/ttpci/av7110_av.c l = write_ts_header2(pid, counter, pes_start, l 795 drivers/media/pci/ttpci/av7110_av.c memcpy(obuf + l, buf + c, TS_SIZE - l); l 796 drivers/media/pci/ttpci/av7110_av.c c += TS_SIZE - l; l 798 drivers/media/pci/ttpci/av7110_av.c l = write_ts_header2(pid, counter, pes_start, l 800 drivers/media/pci/ttpci/av7110_av.c memcpy(obuf + l, buf + c, TS_SIZE - l); l 148 drivers/media/pci/ttpci/av7110_ipack.c int l; l 375 drivers/media/pci/ttpci/av7110_ipack.c l = count - c; l 376 drivers/media/pci/ttpci/av7110_ipack.c if (l + p->found > p->plength + 6) l 377 drivers/media/pci/ttpci/av7110_ipack.c l = p->plength + 6 - p->found; l 378 drivers/media/pci/ttpci/av7110_ipack.c write_ipack(p, buf + c, l); l 379 drivers/media/pci/ttpci/av7110_ipack.c p->found += l; l 380 drivers/media/pci/ttpci/av7110_ipack.c c += l; l 90 drivers/media/platform/am437x/am437x-vpfe.c struct bus_format l; l 100 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 101 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 4, l 108 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 109 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 4, l 116 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 117 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 4, l 124 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 125 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 4, l 132 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 133 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 2, l 140 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 141 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 2, l 148 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 149 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 2, l 156 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 157 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 2, l 164 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 165 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 4, l 172 drivers/media/platform/am437x/am437x-vpfe.c .l.width = 10, l 173 drivers/media/platform/am437x/am437x-vpfe.c .l.bpp = 4, l 231 drivers/media/platform/am437x/am437x-vpfe.c *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp; l 90 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c u8 val, l = 0; l 110 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c l = c->clk.pos; l 112 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c l = c->data[i].pos; l 115 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c CAMSS_CSI_PHY_LNn_CFG2(l)); l 117 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c CAMSS_CSI_PHY_LNn_CFG3(l)); l 119 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c CAMSS_CSI_PHY_INTERRUPT_MASKn(l)); l 121 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c CAMSS_CSI_PHY_INTERRUPT_CLEARn(l)); l 129 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c u8 l = 0; l 134 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c l = c->clk.pos; l 136 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c l = c->data[i].pos; l 139 drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c CAMSS_CSI_PHY_LNn_CFG2(l)); l 142 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c u8 val, l = 0; l 159 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c l = 7; l 161 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c l = c->data[i].pos * 2; l 165 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l)); l 168 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG2(l)); l 171 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG3(l)); l 175 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG5(l)); l 178 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG6(l)); l 181 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG7(l)); l 185 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG8(l)); l 188 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG9(l)); l 191 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_TEST_IMP(l)); l 195 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c CSIPHY_3PH_LNn_CSI_LANE_CTRL15(l)); l 199 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l)); l 202 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG4(l)); l 205 drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_MISC1(l)); l 1140 drivers/media/platform/qcom/camss/camss-vfe.c struct vfe_line *l = &vfe->line[j]; l 1142 drivers/media/platform/qcom/camss/camss-vfe.c bpp = vfe_get_bpp(l->formats, l 1143 drivers/media/platform/qcom/camss/camss-vfe.c l->nformats, l 1144 drivers/media/platform/qcom/camss/camss-vfe.c l->fmt[MSM_VFE_PAD_SINK].code); l 1222 drivers/media/platform/qcom/camss/camss-vfe.c struct vfe_line *l = &vfe->line[j]; l 1224 drivers/media/platform/qcom/camss/camss-vfe.c bpp = vfe_get_bpp(l->formats, l 1225 drivers/media/platform/qcom/camss/camss-vfe.c l->nformats, l 1226 drivers/media/platform/qcom/camss/camss-vfe.c l->fmt[MSM_VFE_PAD_SINK].code); l 2084 drivers/media/platform/qcom/camss/camss-vfe.c struct vfe_line *l = &vfe->line[i]; l 2086 drivers/media/platform/qcom/camss/camss-vfe.c l->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; l 2087 drivers/media/platform/qcom/camss/camss-vfe.c l->video_out.camss = camss; l 2088 drivers/media/platform/qcom/camss/camss-vfe.c l->id = i; l 2089 drivers/media/platform/qcom/camss/camss-vfe.c init_completion(&l->output.sof); l 2090 drivers/media/platform/qcom/camss/camss-vfe.c init_completion(&l->output.reg_update); l 2094 drivers/media/platform/qcom/camss/camss-vfe.c l->formats = formats_pix_8x16; l 2095 drivers/media/platform/qcom/camss/camss-vfe.c l->nformats = ARRAY_SIZE(formats_pix_8x16); l 2097 drivers/media/platform/qcom/camss/camss-vfe.c l->formats = formats_rdi_8x16; l 2098 drivers/media/platform/qcom/camss/camss-vfe.c l->nformats = ARRAY_SIZE(formats_rdi_8x16); l 2102 drivers/media/platform/qcom/camss/camss-vfe.c l->formats = formats_pix_8x96; l 2103 drivers/media/platform/qcom/camss/camss-vfe.c l->nformats = ARRAY_SIZE(formats_pix_8x96); l 2105 drivers/media/platform/qcom/camss/camss-vfe.c l->formats = formats_rdi_8x96; l 2106 drivers/media/platform/qcom/camss/camss-vfe.c l->nformats = ARRAY_SIZE(formats_rdi_8x96); l 621 drivers/media/platform/vicodec/codec-fwht.c unsigned int k, l; l 630 drivers/media/platform/vicodec/codec-fwht.c for (l = 0; l < 8; l++) { l 663 drivers/media/platform/vicodec/codec-fwht.c int k, l; l 666 drivers/media/platform/vicodec/codec-fwht.c for (l = 0; l < 8; l++) { l 50 drivers/media/platform/vivid/vivid-rds-gen.c int l; l 95 drivers/media/platform/vivid/vivid-rds-gen.c l = tm.tm_mon <= 1; l 96 drivers/media/platform/vivid/vivid-rds-gen.c date = 14956 + tm.tm_mday + ((tm.tm_year - l) * 1461) / 4 + l 97 drivers/media/platform/vivid/vivid-rds-gen.c ((tm.tm_mon + 2 + l * 12) * 306001) / 10000; l 84 drivers/media/radio/tea575x.c u16 l; l 93 drivers/media/radio/tea575x.c for (l = 25; l > 0; l--) { l 110 drivers/media/radio/tea575x.c u16 l, rdata; l 120 drivers/media/radio/tea575x.c for (l = 24; l--;) { l 123 drivers/media/radio/tea575x.c if (!l) l 129 drivers/media/radio/tea575x.c if (!l) l 264 drivers/media/rc/serial_ir.c static void frbwrite(unsigned int l, bool is_pulse) l 271 drivers/media/rc/serial_ir.c pulse += l; l 286 drivers/media/rc/serial_ir.c if (l > 20000000) { l 287 drivers/media/rc/serial_ir.c space = l; l 292 drivers/media/rc/serial_ir.c if (l > 20000000) { l 296 drivers/media/rc/serial_ir.c space += l; l 314 drivers/media/rc/serial_ir.c ev.duration = l; l 21 drivers/media/usb/b2c2/flexcop-usb.c #define debug_dump(b, l, method) do {\ l 23 drivers/media/usb/b2c2/flexcop-usb.c for (i = 0; i < l; i++) \ l 31 drivers/media/usb/b2c2/flexcop-usb.c #define debug_dump(b, l, method) l 333 drivers/media/usb/b2c2/flexcop-usb.c int l; l 343 drivers/media/usb/b2c2/flexcop-usb.c l = fc_usb->tmp_buffer_length; l 346 drivers/media/usb/b2c2/flexcop-usb.c l=buffer_length; l 349 drivers/media/usb/b2c2/flexcop-usb.c while (l >= 190) { l 359 drivers/media/usb/b2c2/flexcop-usb.c l -= 190; l 363 drivers/media/usb/b2c2/flexcop-usb.c l = 0; l 368 drivers/media/usb/b2c2/flexcop-usb.c l = 0; l 372 drivers/media/usb/b2c2/flexcop-usb.c if (l>0) l 373 drivers/media/usb/b2c2/flexcop-usb.c memcpy(fc_usb->tmp_buffer, b, l); l 374 drivers/media/usb/b2c2/flexcop-usb.c fc_usb->tmp_buffer_length = l; l 54 drivers/media/usb/dvb-usb-v2/dvb_usb.h #define dvb_usb_dbg_usb_control_msg(udev, r, t, v, i, b, l) { \ l 62 drivers/media/usb/dvb-usb-v2/dvb_usb.h i & 0xff, i >> 8, l & 0xff, l >> 8, direction, l, b); \ l 34 drivers/media/usb/dvb-usb/dvb-usb.h #define debug_dump(b,l,func) {\ l 36 drivers/media/usb/dvb-usb/dvb-usb.h for (loop_ = 0; loop_ < l; loop_++) func("%02x ", b[loop_]); \ l 42 drivers/media/usb/dvb-usb/dvb-usb.h #define debug_dump(b,l,func) l 614 drivers/media/usb/em28xx/em28xx-i2c.c unsigned long l = 0; l 625 drivers/media/usb/em28xx/em28xx-i2c.c l = (l << 8) | c; l 628 drivers/media/usb/em28xx/em28xx-i2c.c hash = ((hash ^ l) * 0x9e370001UL); l 562 drivers/media/usb/gspca/cpia1.c u8 i, u8 j, u8 k, u8 l) l 581 drivers/media/usb/gspca/cpia1.c gspca_dev->usb_buf[7] = l; l 1442 drivers/media/usb/gspca/ov534_9.c int l; l 1467 drivers/media/usb/gspca/ov534_9.c l = strlen(p) - 1; l 1468 drivers/media/usb/gspca/ov534_9.c if (p[l] == '0') l 1011 drivers/media/usb/gspca/topro.c const struct cmd *p, int l) l 1016 drivers/media/usb/gspca/topro.c } while (--l > 0); l 1038 drivers/media/usb/gspca/topro.c const struct cmd *p, int l) l 1043 drivers/media/usb/gspca/topro.c } while (--l > 0); l 228 drivers/media/usb/gspca/touptek.c const struct cmd *p, int l) l 233 drivers/media/usb/gspca/touptek.c } while (--l > 0); l 3638 drivers/media/usb/gspca/vc032x.c int size, l; l 3640 drivers/media/usb/gspca/vc032x.c l = gspca_dev->image_len; l 3642 drivers/media/usb/gspca/vc032x.c if (len > size - l) l 3643 drivers/media/usb/gspca/vc032x.c len = size - l; l 111 drivers/memory/omap-gpmc.c #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf) l 112 drivers/memory/omap-gpmc.c #define GPMC_REVISION_MINOR(l) (l & 0xf) l 304 drivers/memory/omap-gpmc.c u32 l; l 310 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); l 311 drivers/memory/omap-gpmc.c div = (l & 0x03) + 1; l 376 drivers/memory/omap-gpmc.c u32 l; l 378 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, reg); l 380 drivers/memory/omap-gpmc.c l |= mask; l 382 drivers/memory/omap-gpmc.c l &= ~mask; l 383 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, reg, l); l 437 drivers/memory/omap-gpmc.c u32 l; l 442 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, reg); l 445 drivers/memory/omap-gpmc.c l = (l >> st_bit) & mask; l 448 drivers/memory/omap-gpmc.c invalid = l > max; l 450 drivers/memory/omap-gpmc.c l = (shift << l); l 451 drivers/memory/omap-gpmc.c if (noval && (l == 0)) l 458 drivers/memory/omap-gpmc.c if (l) l 459 drivers/memory/omap-gpmc.c time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1; l 460 drivers/memory/omap-gpmc.c time_ns = gpmc_clk_ticks_to_ns(l, cs, cd); l 462 drivers/memory/omap-gpmc.c name, time_ns, time_ns_min, time_ns, l, l 466 drivers/memory/omap-gpmc.c pr_info("gpmc,%s = <%u>;%s\n", name, l, l 470 drivers/memory/omap-gpmc.c return l; l 606 drivers/memory/omap-gpmc.c u32 l; l 626 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, reg); l 631 drivers/memory/omap-gpmc.c (l >> st_bit) & mask, time); l 633 drivers/memory/omap-gpmc.c l &= ~(mask << st_bit); l 634 drivers/memory/omap-gpmc.c l |= ticks << st_bit; l 635 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, reg, l); l 709 drivers/memory/omap-gpmc.c u32 l; l 778 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); l 779 drivers/memory/omap-gpmc.c l &= ~0x03; l 780 drivers/memory/omap-gpmc.c l |= (div - 1); l 781 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l); l 803 drivers/memory/omap-gpmc.c u32 l; l 818 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); l 819 drivers/memory/omap-gpmc.c l &= ~GPMC_CONFIG7_MASK; l 820 drivers/memory/omap-gpmc.c l |= base & GPMC_CONFIG7_BASEADDRESS_MASK; l 821 drivers/memory/omap-gpmc.c l |= mask & GPMC_CONFIG7_MASKADDRESS_MASK; l 822 drivers/memory/omap-gpmc.c l |= GPMC_CONFIG7_CSVALID; l 823 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); l 830 drivers/memory/omap-gpmc.c u32 l; l 832 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); l 833 drivers/memory/omap-gpmc.c l |= GPMC_CONFIG7_CSVALID; l 834 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); l 839 drivers/memory/omap-gpmc.c u32 l; l 841 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); l 842 drivers/memory/omap-gpmc.c l &= ~GPMC_CONFIG7_CSVALID; l 843 drivers/memory/omap-gpmc.c gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l); l 848 drivers/memory/omap-gpmc.c u32 l; l 851 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); l 852 drivers/memory/omap-gpmc.c *base = (l & 0x3f) << GPMC_CHUNK_SHIFT; l 853 drivers/memory/omap-gpmc.c mask = (l >> 8) & 0x0f; l 859 drivers/memory/omap-gpmc.c u32 l; l 861 drivers/memory/omap-gpmc.c l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); l 862 drivers/memory/omap-gpmc.c return l & GPMC_CONFIG7_CSVALID; l 2347 drivers/memory/omap-gpmc.c u32 l; l 2400 drivers/memory/omap-gpmc.c l = gpmc_read_reg(GPMC_REVISION); l 2414 drivers/memory/omap-gpmc.c if (GPMC_REVISION_MAJOR(l) > 0x4) l 2416 drivers/memory/omap-gpmc.c if (GPMC_REVISION_MAJOR(l) > 0x5) l 2418 drivers/memory/omap-gpmc.c dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l), l 2419 drivers/memory/omap-gpmc.c GPMC_REVISION_MINOR(l)); l 589 drivers/message/fusion/lsi/mpi.h #define MPI_SGE_SET_FLAGS_LENGTH(f,l) (MPI_SGE_SET_FLAGS(f) | MPI_SGE_LENGTH(l)) l 593 drivers/message/fusion/lsi/mpi.h #define MPI_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI_SGE_SET_FLAGS_LENGTH(f,l) l 596 drivers/message/fusion/lsi/mpi.h #define MPI_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI_SGE_LENGTH(l) l 969 drivers/message/fusion/mptlan.c int i, l; l 1026 drivers/message/fusion/mptlan.c l = priv->RcvCtl[ctx].len; l 1027 drivers/message/fusion/mptlan.c if (szrem < l) l 1028 drivers/message/fusion/mptlan.c l = szrem; l 1038 drivers/message/fusion/mptlan.c skb_copy_from_linear_data(old_skb, skb_put(skb, l), l); l 1046 drivers/message/fusion/mptlan.c szrem -= l; l 263 drivers/mfd/dm355evm_msp.c #define GPIO_LED(l) .name = l, .active_low = true l 615 drivers/mfd/ucb1x00-core.c struct list_head *l, *n; l 619 drivers/mfd/ucb1x00-core.c list_for_each_safe(l, n, &ucb->devs) { l 620 drivers/mfd/ucb1x00-core.c struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node); l 652 drivers/mfd/ucb1x00-core.c struct list_head *n, *l; l 656 drivers/mfd/ucb1x00-core.c list_for_each_safe(l, n, &drv->devs) { l 657 drivers/mfd/ucb1x00-core.c struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node); l 214 drivers/misc/sgi-xp/xpc_partition.c int l; l 233 drivers/misc/sgi-xp/xpc_partition.c for (l = 0; l < xpc_nasid_mask_nlongs; l++) l 234 drivers/misc/sgi-xp/xpc_partition.c discovered_nasids[l] |= remote_part_nasids[l]; l 1360 drivers/mmc/host/vub300.c int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name), l 1366 drivers/mmc/host/vub300.c l += snprintf(vub300->vub_name + l, l 1367 drivers/mmc/host/vub300.c sizeof(vub300->vub_name) - l, "_%04X%04X", l 1370 drivers/mmc/host/vub300.c snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); l 454 drivers/mtd/devices/mtd_dataflash.c size_t l; l 467 drivers/mtd/devices/mtd_dataflash.c l = 4 + base + off + len; l 468 drivers/mtd/devices/mtd_dataflash.c scratch = kzalloc(l, GFP_KERNEL); l 480 drivers/mtd/devices/mtd_dataflash.c t.len = l; l 533 drivers/mtd/devices/mtd_dataflash.c const size_t l = 4 + 64; l 555 drivers/mtd/devices/mtd_dataflash.c scratch = kzalloc(l, GFP_KERNEL); l 565 drivers/mtd/devices/mtd_dataflash.c t.len = l; l 2174 drivers/mtd/nand/raw/marvell_nand.c const struct marvell_hw_ecc_layout *l; l 2187 drivers/mtd/nand/raw/marvell_nand.c l = &marvell_nfc_layouts[i]; l 2188 drivers/mtd/nand/raw/marvell_nand.c if (mtd->writesize == l->writesize && l 2189 drivers/mtd/nand/raw/marvell_nand.c ecc->size == l->chunk && ecc->strength == l->strength) { l 2190 drivers/mtd/nand/raw/marvell_nand.c to_marvell_nand(chip)->layout = l; l 2204 drivers/mtd/nand/raw/marvell_nand.c if (l->writesize == 2048 && l->strength == 8) { l 2214 drivers/mtd/nand/raw/marvell_nand.c ecc->steps = l->nchunks; l 2215 drivers/mtd/nand/raw/marvell_nand.c ecc->size = l->data_bytes; l 451 drivers/mtd/nand/raw/mpc5121_nfc.c uint l; l 468 drivers/mtd/nand/raw/mpc5121_nfc.c l = min((uint)len, mtd->writesize - c); l 469 drivers/mtd/nand/raw/mpc5121_nfc.c prv->column += l; l 472 drivers/mtd/nand/raw/mpc5121_nfc.c memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l); l 474 drivers/mtd/nand/raw/mpc5121_nfc.c memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l); l 477 drivers/mtd/nand/raw/mpc5121_nfc.c if (l != len) { l 478 drivers/mtd/nand/raw/mpc5121_nfc.c buf += l; l 479 drivers/mtd/nand/raw/mpc5121_nfc.c len -= l; l 1376 drivers/mtd/nand/raw/sunxi_nand.c #define sunxi_nand_lookup_timing(l, p, c) \ l 1377 drivers/mtd/nand/raw/sunxi_nand.c _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) l 3228 drivers/mtd/spi-nor/spi-nor.c static int spi_nor_map_cmp_erase_type(const void *l, const void *r) l 3230 drivers/mtd/spi-nor/spi-nor.c const struct spi_nor_erase_type *left = l, *right = r; l 25 drivers/mtd/ubi/debug.h #define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \ l 26 drivers/mtd/ubi/debug.h print_hex_dump(l, ps, pt, r, g, b, len, a) l 444 drivers/mtd/ubi/gluebi.c static int gluebi_notify(struct notifier_block *nb, unsigned long l, l 449 drivers/mtd/ubi/gluebi.c switch (l) { l 225 drivers/mtd/ubi/upd.c int l = ALIGN(len, ubi->min_io_size); l 227 drivers/mtd/ubi/upd.c memset(buf + len, 0xFF, l - len); l 228 drivers/mtd/ubi/upd.c len = ubi_calc_data_len(ubi, buf, l); l 55 drivers/net/can/usb/peak_usb/pcan_usb_core.c void pcan_dump_mem(char *prompt, void *p, int l) l 58 drivers/net/can/usb/peak_usb/pcan_usb_core.c PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); l 60 drivers/net/can/usb/peak_usb/pcan_usb_core.c DUMP_WIDTH, 1, p, l, false); l 139 drivers/net/can/usb/peak_usb/pcan_usb_core.h void pcan_dump_mem(char *prompt, void *p, int l); l 284 drivers/net/can/usb/peak_usb/pcan_usb_fd.c int l; l 288 drivers/net/can/usb/peak_usb/pcan_usb_fd.c l = pcan_usb_fd_build_restart_cmd(dev, pc); l 295 drivers/net/can/usb/peak_usb/pcan_usb_fd.c l = sizeof(struct pucan_command); l 299 drivers/net/can/usb/peak_usb/pcan_usb_fd.c return pcan_usb_fd_send_cmd(dev, pc + l); l 139 drivers/net/dsa/bcm_sf2_cfp.c static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l, l 146 drivers/net/dsa/bcm_sf2_cfp.c slice_layout = &l->udfs[slice_idx]; l 326 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c unsigned int l = 0U; l 333 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | l 337 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); l 787 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | l 793 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c l, l 368 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c unsigned int l = 0U; l 375 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | l 379 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC); l 859 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | l 865 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c l, HW_ATL_B0_MAC_MIN + i); l 654 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c u32 l = 0U; l 685 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c l = 0xE3000000U | l 690 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c mac[5] = (u8)(0xFFU & l); l 691 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c l >>= 8; l 692 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c mac[4] = (u8)(0xFFU & l); l 693 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c l >>= 8; l 694 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c mac[3] = (u8)(0xFFU & l); l 695 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c l >>= 8; l 696 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c mac[2] = (u8)(0xFFU & l); l 249 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c u32 l = 0U; l 271 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16); l 274 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c mac[5] = (u8)(0xFFU & l); l 275 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c l >>= 8; l 276 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c mac[4] = (u8)(0xFFU & l); l 277 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c l >>= 8; l 278 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c mac[3] = (u8)(0xFFU & l); l 279 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c l >>= 8; l 280 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c mac[2] = (u8)(0xFFU & l); l 110 drivers/net/ethernet/atheros/alx/hw.h } l; l 1424 drivers/net/ethernet/atheros/alx/main.c first->adrl.l.pkt_len = skb->len; l 102 drivers/net/ethernet/atheros/atl1e/atl1e_param.c } l; l 138 drivers/net/ethernet/atheros/atl1e/atl1e_param.c for (i = 0; i < opt->arg.l.nr; i++) { l 139 drivers/net/ethernet/atheros/atl1e/atl1e_param.c ent = &opt->arg.l.p[i]; l 125 drivers/net/ethernet/atheros/atlx/atl1.c } l; l 159 drivers/net/ethernet/atheros/atlx/atl1.c for (i = 0; i < opt->arg.l.nr; i++) { l 160 drivers/net/ethernet/atheros/atlx/atl1.c ent = &opt->arg.l.p[i]; l 2895 drivers/net/ethernet/atheros/atlx/atl2.c } l; l 2927 drivers/net/ethernet/atheros/atlx/atl2.c for (i = 0; i < opt->arg.l.nr; i++) { l 2928 drivers/net/ethernet/atheros/atlx/atl2.c ent = &opt->arg.l.p[i]; l 332 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h int l; l 497 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l); l 505 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) || l 506 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) { l 508 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h REG_WR(bp, write_arb_addr[i].l, l 509 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h write_arb_data[i][w_order].l); l 518 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h val = REG_RD(bp, write_arb_addr[i].l); l 519 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h REG_WR(bp, write_arb_addr[i].l, l 520 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h val | (write_arb_data[i][w_order].l << 10)); l 534 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h val += write_arb_data[NUM_WR_Q-1][w_order].l << 17; l 539 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h val += read_arb_data[NUM_RD_Q-1][r_order].l << 17; l 616 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h #define ILT_RANGE(f, l) (((l) << 10) | f) l 12983 drivers/net/ethernet/broadcom/tg3.c int l; l 12986 drivers/net/ethernet/broadcom/tg3.c for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) l 12990 drivers/net/ethernet/broadcom/tg3.c int l; l 12993 drivers/net/ethernet/broadcom/tg3.c for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1) l 12997 drivers/net/ethernet/broadcom/tg3.c for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1) l 1697 drivers/net/ethernet/brocade/bna/bfa_ioc.c u32 off, l, s, residue, fifo_sz; l 1709 drivers/net/ethernet/brocade/bna/bfa_ioc.c l = (n + 1) * fifo_sz - s; l 1710 drivers/net/ethernet/brocade/bna/bfa_ioc.c if (l > residue) l 1711 drivers/net/ethernet/brocade/bna/bfa_ioc.c l = residue; l 1713 drivers/net/ethernet/brocade/bna/bfa_ioc.c status = bfa_flash_read_start(pci_bar, offset + off, l, l 1728 drivers/net/ethernet/brocade/bna/bfa_ioc.c bfa_flash_read_end(pci_bar, l, &buf[off]); l 1730 drivers/net/ethernet/brocade/bna/bfa_ioc.c residue -= l; l 1731 drivers/net/ethernet/brocade/bna/bfa_ioc.c off += l; l 182 drivers/net/ethernet/faraday/ftgmac100.c unsigned int l; l 194 drivers/net/ethernet/faraday/ftgmac100.c l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); l 198 drivers/net/ethernet/faraday/ftgmac100.c mac[2] = (l >> 24) & 0xff; l 199 drivers/net/ethernet/faraday/ftgmac100.c mac[3] = (l >> 16) & 0xff; l 200 drivers/net/ethernet/faraday/ftgmac100.c mac[4] = (l >> 8) & 0xff; l 201 drivers/net/ethernet/faraday/ftgmac100.c mac[5] = l & 0xff; l 710 drivers/net/ethernet/freescale/gianfar_ethtool.c int j = MAX_FILER_IDX, l = 0x0; l 762 drivers/net/ethernet/freescale/gianfar_ethtool.c for (l = i+1; l < MAX_FILER_IDX; l++) { l 763 drivers/net/ethernet/freescale/gianfar_ethtool.c if ((priv->ftp_rqfcr[l] & RQFCR_CLE) && l 764 drivers/net/ethernet/freescale/gianfar_ethtool.c !(priv->ftp_rqfcr[l] & RQFCR_AND)) { l 765 drivers/net/ethernet/freescale/gianfar_ethtool.c priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | l 767 drivers/net/ethernet/freescale/gianfar_ethtool.c priv->ftp_rqfpr[l] = FPR_FILER_MASK; l 768 drivers/net/ethernet/freescale/gianfar_ethtool.c gfar_write_filer(priv, l, priv->ftp_rqfcr[l], l 769 drivers/net/ethernet/freescale/gianfar_ethtool.c priv->ftp_rqfpr[l]); l 773 drivers/net/ethernet/freescale/gianfar_ethtool.c if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) && l 774 drivers/net/ethernet/freescale/gianfar_ethtool.c (priv->ftp_rqfcr[l] & RQFCR_AND)) l 777 drivers/net/ethernet/freescale/gianfar_ethtool.c local_rqfpr[j] = priv->ftp_rqfpr[l]; l 778 drivers/net/ethernet/freescale/gianfar_ethtool.c local_rqfcr[j] = priv->ftp_rqfcr[l]; l 783 drivers/net/ethernet/freescale/gianfar_ethtool.c priv->cur_filer_idx = l - 1; l 408 drivers/net/ethernet/freescale/ucc_geth.c out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); l 660 drivers/net/ethernet/freescale/ucc_geth.h u16 l; /* address (LSB) */ l 148 drivers/net/ethernet/ibm/ehea/ehea_main.c int num_fw_handles, k, l; l 189 drivers/net/ethernet/ibm/ehea/ehea_main.c for (l = 0; l < port->num_def_qps; l++) { l 190 drivers/net/ethernet/ibm/ehea/ehea_main.c struct ehea_port_res *pr = &port->port_res[l]; l 173 drivers/net/ethernet/ibm/emac/emac.h #define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16)) l 174 drivers/net/ethernet/ibm/emac/emac.h #define EMAC4_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14)) l 330 drivers/net/ethernet/ibm/emac/mal.c struct list_head *l; l 336 drivers/net/ethernet/ibm/emac/mal.c list_for_each(l, &mal->list) { l 337 drivers/net/ethernet/ibm/emac/mal.c struct mal_commac *mc = list_entry(l, struct mal_commac, list); l 395 drivers/net/ethernet/ibm/emac/mal.c struct list_head *l; l 402 drivers/net/ethernet/ibm/emac/mal.c list_for_each(l, &mal->poll_list) { l 404 drivers/net/ethernet/ibm/emac/mal.c list_entry(l, struct mal_commac, poll_list); l 413 drivers/net/ethernet/ibm/emac/mal.c list_for_each(l, &mal->poll_list) { l 415 drivers/net/ethernet/ibm/emac/mal.c list_entry(l, struct mal_commac, poll_list); l 435 drivers/net/ethernet/ibm/emac/mal.c list_for_each(l, &mal->poll_list) { l 437 drivers/net/ethernet/ibm/emac/mal.c list_entry(l, struct mal_commac, poll_list); l 1384 drivers/net/ethernet/intel/e1000/e1000_ethtool.c int i, j, k, l, lc, good_cnt, ret_val = 0; l 1399 drivers/net/ethernet/intel/e1000/e1000_ethtool.c k = l = 0; l 1418 drivers/net/ethernet/intel/e1000/e1000_ethtool.c rxdr->buffer_info[l].dma, l 1423 drivers/net/ethernet/intel/e1000/e1000_ethtool.c rxdr->buffer_info[l].rxbuf.data + l 1428 drivers/net/ethernet/intel/e1000/e1000_ethtool.c if (unlikely(++l == rxdr->count)) l 1429 drivers/net/ethernet/intel/e1000/e1000_ethtool.c l = 0; l 179 drivers/net/ethernet/intel/e1000/e1000_param.c } l; l 213 drivers/net/ethernet/intel/e1000/e1000_param.c for (i = 0; i < opt->arg.l.nr; i++) { l 214 drivers/net/ethernet/intel/e1000/e1000_param.c ent = &opt->arg.l.p[i]; l 343 drivers/net/ethernet/intel/e1000/e1000_param.c .arg = { .l = { .nr = ARRAY_SIZE(fc_list), l 555 drivers/net/ethernet/intel/e1000/e1000_param.c .arg = { .l = { .nr = ARRAY_SIZE(speed_list), l 577 drivers/net/ethernet/intel/e1000/e1000_param.c .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), l 633 drivers/net/ethernet/intel/e1000/e1000_param.c .arg = { .l = { .nr = ARRAY_SIZE(an_list), l 1632 drivers/net/ethernet/intel/e1000e/ethtool.c int i, j, k, l; l 1651 drivers/net/ethernet/intel/e1000e/ethtool.c l = 0; l 1674 drivers/net/ethernet/intel/e1000e/ethtool.c buffer_info = &rx_ring->buffer_info[l]; l 1684 drivers/net/ethernet/intel/e1000e/ethtool.c l++; l 1685 drivers/net/ethernet/intel/e1000e/ethtool.c if (l == rx_ring->count) l 1686 drivers/net/ethernet/intel/e1000e/ethtool.c l = 0; l 159 drivers/net/ethernet/intel/e1000e/param.c } l; l 196 drivers/net/ethernet/intel/e1000e/param.c for (i = 0; i < opt->arg.l.nr; i++) { l 197 drivers/net/ethernet/intel/e1000e/param.c ent = &opt->arg.l.p[i]; l 99 drivers/net/ethernet/intel/igb/igb.h struct list_head l; l 3561 drivers/net/ethernet/intel/igb/igb_main.c INIT_LIST_HEAD(&adapter->vf_macs.l); l 3568 drivers/net/ethernet/intel/igb/igb_main.c list_add(&mac_list->l, &adapter->vf_macs.l); l 7360 drivers/net/ethernet/intel/igb/igb_main.c list_for_each(pos, &adapter->vf_macs.l) { l 7361 drivers/net/ethernet/intel/igb/igb_main.c entry = list_entry(pos, struct vf_mac_filter, l); l 7385 drivers/net/ethernet/intel/igb/igb_main.c list_for_each(pos, &adapter->vf_macs.l) { l 7386 drivers/net/ethernet/intel/igb/igb_main.c entry = list_entry(pos, struct vf_mac_filter, l); l 173 drivers/net/ethernet/intel/ixgb/ixgb_param.c } l; l 206 drivers/net/ethernet/intel/ixgb/ixgb_param.c for (i = 0; i < opt->arg.l.nr; i++) { l 207 drivers/net/ethernet/intel/ixgb/ixgb_param.c ent = &opt->arg.l.p[i]; l 313 drivers/net/ethernet/intel/ixgb/ixgb_param.c .arg = { .l = { .nr = ARRAY_SIZE(fc_list), l 195 drivers/net/ethernet/intel/ixgbe/ixgbe.h struct list_head l; l 40 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c INIT_LIST_HEAD(&adapter->vf_mvs.l); l 44 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c list_add(&mv_list[i].l, &adapter->vf_mvs.l); l 644 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c list_for_each(pos, &adapter->vf_mvs.l) { l 645 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c entry = list_entry(pos, struct vf_macvlans, l); l 665 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c list_for_each(pos, &adapter->vf_mvs.l) { l 666 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c entry = list_entry(pos, struct vf_macvlans, l); l 275 drivers/net/ethernet/mellanox/mlx4/en_netdev.c unsigned long l; l 278 drivers/net/ethernet/mellanox/mlx4/en_netdev.c l = (__force unsigned long)src_port | l 280 drivers/net/ethernet/mellanox/mlx4/en_netdev.c l ^= (__force unsigned long)(src_ip ^ dst_ip); l 282 drivers/net/ethernet/mellanox/mlx4/en_netdev.c bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); l 429 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c unsigned long l; l 432 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c l = (__force unsigned long)src_port | l 435 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c bucket_idx = hash_long(l, ARFS_HASH_SHIFT); l 927 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c int table_prio, l = 0; l 960 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c for (l = 0; l <= level; l++) { l 961 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c if (fdb_prio_table(esw, chain, prio, l).fdb) { l 962 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c fdb_prio_table(esw, chain, prio, l).num_rules++; l 966 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c fdb = create_next_size_table(esw, ns, table_prio, l, flags); l 968 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c l--; l 972 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c fdb_prio_table(esw, chain, prio, l).fdb = fdb; l 973 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c fdb_prio_table(esw, chain, prio, l).num_rules = 1; l 981 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c if (l >= 0) l 982 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw_put_prio_table(esw, chain, prio, l); l 990 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c int l; l 997 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c for (l = level; l >= 0; l--) { l 998 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0) l 1001 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte); l 1002 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb); l 1003 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c fdb_prio_table(esw, chain, prio, l).fdb = NULL; l 349 drivers/net/ethernet/mellanox/mlx5/core/port.c MLX5_SET(mcia_reg, in, l, 0); l 8376 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, mcia, l, 0x00, 31, 1); l 671 drivers/net/ethernet/neterion/s2io.c int l = (j * lst_per_page) + k; l 672 drivers/net/ethernet/neterion/s2io.c if (l == tx_cfg->fifo_len) l 674 drivers/net/ethernet/neterion/s2io.c fifo->list_info[l].list_virt_addr = l 676 drivers/net/ethernet/neterion/s2io.c fifo->list_info[l].list_phy_addr = l 733 drivers/net/ethernet/neterion/s2io.c int l; l 759 drivers/net/ethernet/neterion/s2io.c for (l = 0; l < rxd_count[nic->rxd_mode]; l++) { l 760 drivers/net/ethernet/neterion/s2io.c rx_blocks->rxds[l].virt_addr = l 762 drivers/net/ethernet/neterion/s2io.c (rxd_size[nic->rxd_mode] * l); l 763 drivers/net/ethernet/neterion/s2io.c rx_blocks->rxds[l].dma_addr = l 765 drivers/net/ethernet/neterion/s2io.c (rxd_size[nic->rxd_mode] * l); l 23 drivers/net/ethernet/netronome/nfp/bpf/jit.c for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ l 24 drivers/net/ethernet/netronome/nfp/bpf/jit.c next = list_next_entry(pos, l); \ l 25 drivers/net/ethernet/netronome/nfp/bpf/jit.c &(nfp_prog)->insns != &pos->l && \ l 26 drivers/net/ethernet/netronome/nfp/bpf/jit.c &(nfp_prog)->insns != &next->l; \ l 31 drivers/net/ethernet/netronome/nfp/bpf/jit.c for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \ l 32 drivers/net/ethernet/netronome/nfp/bpf/jit.c next = list_next_entry(pos, l), \ l 33 drivers/net/ethernet/netronome/nfp/bpf/jit.c next2 = list_next_entry(next, l); \ l 34 drivers/net/ethernet/netronome/nfp/bpf/jit.c &(nfp_prog)->insns != &pos->l && \ l 35 drivers/net/ethernet/netronome/nfp/bpf/jit.c &(nfp_prog)->insns != &next->l && \ l 36 drivers/net/ethernet/netronome/nfp/bpf/jit.c &(nfp_prog)->insns != &next2->l; \ l 44 drivers/net/ethernet/netronome/nfp/bpf/jit.c return meta->l.prev != &nfp_prog->insns; l 3554 drivers/net/ethernet/netronome/nfp/bpf/jit.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 3565 drivers/net/ethernet/netronome/nfp/bpf/jit.c if (list_is_last(&meta->l, &nfp_prog->insns)) l 3568 drivers/net/ethernet/netronome/nfp/bpf/jit.c br_idx = list_next_entry(meta, l)->off - 1; l 3838 drivers/net/ethernet/netronome/nfp/bpf/jit.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 3886 drivers/net/ethernet/netronome/nfp/bpf/jit.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 3912 drivers/net/ethernet/netronome/nfp/bpf/jit.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 4279 drivers/net/ethernet/netronome/nfp/bpf/jit.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 4368 drivers/net/ethernet/netronome/nfp/bpf/jit.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 4490 drivers/net/ethernet/netronome/nfp/bpf/jit.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 20 drivers/net/ethernet/netronome/nfp/bpf/main.c .head_offset = offsetof(struct nfp_bpf_neutral_map, l), l 220 drivers/net/ethernet/netronome/nfp/bpf/main.h struct list_head l; l 225 drivers/net/ethernet/netronome/nfp/bpf/main.h struct rhash_head l; l 238 drivers/net/ethernet/netronome/nfp/bpf/main.h list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) l 240 drivers/net/ethernet/netronome/nfp/bpf/main.h list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l) l 241 drivers/net/ethernet/netronome/nfp/bpf/main.h #define nfp_meta_next(meta) list_next_entry(meta, l) l 242 drivers/net/ethernet/netronome/nfp/bpf/main.h #define nfp_meta_prev(meta) list_prev_entry(meta, l) l 346 drivers/net/ethernet/netronome/nfp/bpf/main.h struct list_head l; l 63 drivers/net/ethernet/netronome/nfp/bpf/offload.c err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l, l 92 drivers/net/ethernet/netronome/nfp/bpf/offload.c &nfp_prog->map_records[i]->l, l 165 drivers/net/ethernet/netronome/nfp/bpf/offload.c list_add_tail(&meta->l, &nfp_prog->insns); l 180 drivers/net/ethernet/netronome/nfp/bpf/offload.c list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { l 181 drivers/net/ethernet/netronome/nfp/bpf/offload.c list_del(&meta->l); l 400 drivers/net/ethernet/netronome/nfp/bpf/offload.c list_add_tail(&nfp_map->l, &bpf->map_list); l 413 drivers/net/ethernet/netronome/nfp/bpf/offload.c list_del_init(&nfp_map->l); l 674 drivers/net/ethernet/netronome/nfp/bpf/verifier.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 752 drivers/net/ethernet/netronome/nfp/bpf/verifier.c list_for_each_entry(meta, &nfp_prog->insns, l) { l 820 drivers/net/ethernet/netronome/nfp/bpf/verifier.c meta->jmp_dst = list_next_entry(meta, l); l 846 drivers/net/ethernet/netronome/nfp/bpf/verifier.c if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns)) l 854 drivers/net/ethernet/netronome/nfp/bpf/verifier.c meta = list_next_entry(meta, l); l 147 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c } l; l 246 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c for (i = 0; i < opt->arg.l.nr; i++) { l 247 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c ent = &opt->arg.l.p[i]; l 282 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c .arg = { .l = { .nr = (int)ARRAY_SIZE(speed_list), l 294 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c .arg = { .l = { .nr = (int)ARRAY_SIZE(dplx_list), l 307 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c .arg = { .l = { .nr = (int)ARRAY_SIZE(an_list), l 500 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list), l 187 drivers/net/ethernet/smsc/smc911x.h #define SMC_insl(lp, r, p, l) ioread16_rep((short*)((lp)->base + (r)), p, l*2) l 188 drivers/net/ethernet/smsc/smc911x.h #define SMC_outsl(lp, r, p, l) iowrite16_rep((short*)((lp)->base + (r)), p, l*2) l 193 drivers/net/ethernet/smsc/smc911x.h #define SMC_insl(lp, r, p, l) ioread32_rep((int*)((lp)->base + (r)), p, l) l 194 drivers/net/ethernet/smsc/smc911x.h #define SMC_outsl(lp, r, p, l) iowrite32_rep((int*)((lp)->base + (r)), p, l) l 214 drivers/net/ethernet/smsc/smc911x.h #define SMC_insl(lp, r, p, l) \ l 215 drivers/net/ethernet/smsc/smc911x.h smc_pxa_dma_insl(lp, lp->physaddr, r, lp->rxdma, p, l) l 246 drivers/net/ethernet/smsc/smc911x.h #define SMC_outsl(lp, r, p, l) \ l 247 drivers/net/ethernet/smsc/smc911x.h smc_pxa_dma_outsl(lp, lp->physaddr, r, lp->txdma, p, l) l 692 drivers/net/ethernet/smsc/smc911x.h #define SMC_PUSH_DATA(lp, p, l) SMC_outsl( lp, TX_DATA_FIFO, p, (l) >> 2 ) l 693 drivers/net/ethernet/smsc/smc911x.h #define SMC_PULL_DATA(lp, p, l) SMC_insl ( lp, RX_DATA_FIFO, p, (l) >> 2 ) l 90 drivers/net/ethernet/smsc/smc91x.h #define SMC_insb(a, r, p, l) readsb((a) + (r), p, l) l 91 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l) l 92 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) l 93 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) l 94 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) l 95 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) l 129 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l) l 130 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l) l 131 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) l 132 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) l 149 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) l 150 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) l 151 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) l 152 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) l 164 drivers/net/ethernet/smsc/smc91x.h static inline void mcf_insw(void *a, unsigned char *p, int l) l 167 drivers/net/ethernet/smsc/smc91x.h while (l-- > 0) l 171 drivers/net/ethernet/smsc/smc91x.h static inline void mcf_outsw(void *a, unsigned char *p, int l) l 174 drivers/net/ethernet/smsc/smc91x.h while (l-- > 0) l 180 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) l 181 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) l 193 drivers/net/ethernet/smsc/smc91x.h #define SMC_insb(a, r, p, l) ioread8_rep((a) + (r), p, l) l 194 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsb(a, r, p, l) iowrite8_rep((a) + (r), p, l) l 215 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l) l 216 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l) l 217 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l) ioread32_rep((a) + (r), p, l) l 218 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l) iowrite32_rep((a) + (r), p, l) l 296 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l) \ l 297 drivers/net/ethernet/smsc/smc91x.h smc_pxa_dma_insl(a, lp, r, dev->dma, p, l) l 365 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) \ l 366 drivers/net/ethernet/smsc/smc91x.h smc_pxa_dma_insw(a, lp, r, dev->dma, p, l) l 419 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l) BUG() l 420 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l) BUG() l 424 drivers/net/ethernet/smsc/smc91x.h #define SMC_insl(a, r, p, l) BUG() l 425 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsl(a, r, p, l) BUG() l 432 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) BUG() l 433 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l) BUG() l 438 drivers/net/ethernet/smsc/smc91x.h #define SMC_insw(a, r, p, l) BUG() l 439 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsw(a, r, p, l) BUG() l 447 drivers/net/ethernet/smsc/smc91x.h #define SMC_insb(a, r, p, l) BUG() l 448 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsb(a, r, p, l) BUG() l 452 drivers/net/ethernet/smsc/smc91x.h #define SMC_insb(a, r, p, l) BUG() l 453 drivers/net/ethernet/smsc/smc91x.h #define SMC_outsb(a, r, p, l) BUG() l 1059 drivers/net/ethernet/smsc/smc91x.h #define SMC_PUSH_DATA(lp, p, l) \ l 1063 drivers/net/ethernet/smsc/smc91x.h int __len = (l); \ l 1078 drivers/net/ethernet/smsc/smc91x.h SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ l 1080 drivers/net/ethernet/smsc/smc91x.h SMC_outsb(ioaddr, DATA_REG(lp), p, l); \ l 1083 drivers/net/ethernet/smsc/smc91x.h #define SMC_PULL_DATA(lp, p, l) \ l 1087 drivers/net/ethernet/smsc/smc91x.h int __len = (l); \ l 1113 drivers/net/ethernet/smsc/smc91x.h SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ l 1115 drivers/net/ethernet/smsc/smc91x.h SMC_insb(ioaddr, DATA_REG(lp), p, l); \ l 277 drivers/net/ethernet/ti/cpsw_ethtool.c int i, l, ch; l 280 drivers/net/ethernet/ti/cpsw_ethtool.c for (l = 0; l < CPSW_STATS_COMMON_LEN; l++) l 281 drivers/net/ethernet/ti/cpsw_ethtool.c data[l] = readl(cpsw->hw_stats + l 282 drivers/net/ethernet/ti/cpsw_ethtool.c cpsw_gstrings_stats[l].stat_offset); l 286 drivers/net/ethernet/ti/cpsw_ethtool.c for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { l 289 drivers/net/ethernet/ti/cpsw_ethtool.c data[l] = *(u32 *)p; l 295 drivers/net/ethernet/ti/cpsw_ethtool.c for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { l 298 drivers/net/ethernet/ti/cpsw_ethtool.c data[l] = *(u32 *)p; l 1316 drivers/net/ethernet/via/via-velocity.h #define VELOCITY_PRT(l, p, args...) do {if (l<=msglevel) printk( p ,##args);} while (0) l 377 drivers/net/fddi/skfp/fplustm.c static void set_int(char *p, int l) l 379 drivers/net/fddi/skfp/fplustm.c p[0] = (char)(l >> 24) ; l 380 drivers/net/fddi/skfp/fplustm.c p[1] = (char)(l >> 16) ; l 381 drivers/net/fddi/skfp/fplustm.c p[2] = (char)(l >> 8) ; l 382 drivers/net/fddi/skfp/fplustm.c p[3] = (char)(l >> 0) ; l 33 drivers/net/fddi/skfp/h/types.h #define outpd(p,l) iowrite32(l,p) l 35 drivers/net/netdevsim/bpf.c struct list_head l; l 48 drivers/net/netdevsim/bpf.c struct list_head l; l 243 drivers/net/netdevsim/bpf.c list_add_tail(&state->l, &nsim_dev->bpf_bound_progs); l 277 drivers/net/netdevsim/bpf.c list_del(&state->l); l 517 drivers/net/netdevsim/bpf.c list_add_tail(&nmap->l, &ns->nsim_dev->bpf_bound_maps); l 539 drivers/net/netdevsim/bpf.c list_del_init(&nmap->l); l 1054 drivers/net/ppp/pppoe.c loff_t l = *pos; l 1057 drivers/net/ppp/pppoe.c return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN; l 1114 drivers/net/wan/cosa.c int l = strlen(cosa->id_string)+1; l 1115 drivers/net/wan/cosa.c if (copy_to_user(string, cosa->id_string, l)) l 1117 drivers/net/wan/cosa.c return l; l 1123 drivers/net/wan/cosa.c int l = strlen(cosa->type)+1; l 1124 drivers/net/wan/cosa.c if (copy_to_user(string, cosa->type, l)) l 1126 drivers/net/wan/cosa.c return l; l 199 drivers/net/wireless/ath/carl9170/carl9170.h struct led_classdev l; l 109 drivers/net/wireless/ath/carl9170/led.c struct carl9170_led *arl = container_of(led, struct carl9170_led, l); l 133 drivers/net/wireless/ath/carl9170/led.c ar->leds[i].l.name = ar->leds[i].name; l 134 drivers/net/wireless/ath/carl9170/led.c ar->leds[i].l.brightness_set = carl9170_led_set_brightness; l 135 drivers/net/wireless/ath/carl9170/led.c ar->leds[i].l.brightness = 0; l 136 drivers/net/wireless/ath/carl9170/led.c ar->leds[i].l.default_trigger = trigger; l 139 drivers/net/wireless/ath/carl9170/led.c &ar->leds[i].l); l 156 drivers/net/wireless/ath/carl9170/led.c led_classdev_unregister(&ar->leds[i].l); l 163 drivers/net/wireless/ath/dfs_pri_detector.c struct list_head *l = &pde->pulses; l 164 drivers/net/wireless/ath/dfs_pri_detector.c if (list_empty(l)) l 166 drivers/net/wireless/ath/dfs_pri_detector.c return list_entry(l->prev, struct pulse_elem, head); l 1180 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l) l 1185 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c u32 cmd = (l ? BRCMF_C_SET_LRL : BRCMF_C_SET_SRL); l 54 drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h #define brcms_dbg(core, l, f, a...) __brcms_dbg(&(core)->dev, l, __func__, f, ##a) l 1048 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w, l 3654 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c int phy_c4, phy_c5, k, l, j, phy_c6; l 3776 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c for (l = -phy_c7; l <= phy_c7; l += phy_c7) { l 3778 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c phy_c12 = phy_c16 + l; l 4654 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c int l; l 4657 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c l = dot11lcnphytbl_rx_gain_info_2G_rev2_sz; l 4663 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c l = dot11lcnphytbl_rx_gain_info_5G_rev2_sz; l 4670 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c for (idx = 0; idx < l; idx++) l 213 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c int l, u32 sel) l 215 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c brcms_b_copyto_objmem(physhim->wlc_hw, offset, buf, l, sel); l 629 drivers/net/wireless/intel/ipw2x00/ipw2100.c int out, i, j, l; l 634 drivers/net/wireless/intel/ipw2x00/ipw2100.c for (l = 0, i = 0; i < 2; i++) { l 636 drivers/net/wireless/intel/ipw2x00/ipw2100.c for (j = 0; j < 8 && l < len; j++, l++) l 644 drivers/net/wireless/intel/ipw2x00/ipw2100.c for (l = 0, i = 0; i < 2; i++) { l 646 drivers/net/wireless/intel/ipw2x00/ipw2100.c for (j = 0; j < 8 && l < len; j++, l++) { l 223 drivers/net/wireless/intel/ipw2x00/ipw2200.c int out, i, j, l; l 228 drivers/net/wireless/intel/ipw2x00/ipw2200.c for (l = 0, i = 0; i < 2; i++) { l 230 drivers/net/wireless/intel/ipw2x00/ipw2200.c for (j = 0; j < 8 && l < len; j++, l++) l 238 drivers/net/wireless/intel/ipw2x00/ipw2200.c for (l = 0, i = 0; i < 2; i++) { l 240 drivers/net/wireless/intel/ipw2x00/ipw2200.c for (j = 0; j < 8 && l < len; j++, l++) { l 380 drivers/net/wireless/intel/iwlwifi/iwl-drv.c struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data; l 383 drivers/net/wireless/intel/iwlwifi/iwl-drv.c if (len < sizeof(*l) || l 384 drivers/net/wireless/intel/iwlwifi/iwl-drv.c len < sizeof(l->size) + l->size * sizeof(l->cs[0])) l 387 drivers/net/wireless/intel/iwlwifi/iwl-drv.c for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) { l 388 drivers/net/wireless/intel/iwlwifi/iwl-drv.c fwcs = &l->cs[j]; l 140 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c #define LWNG_SETVAL(f,i,s,l,d) \ l 142 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c hdr->f.status = s; hdr->f.len = l; hdr->f.data = d l 580 drivers/net/wireless/intersil/prism54/oid_mgt.c mgt_commit_list(islpci_private *priv, enum oid_num_t *l, int n) l 586 drivers/net/wireless/intersil/prism54/oid_mgt.c struct oid_t *t = &(isl_oid[l[i]]); l 587 drivers/net/wireless/intersil/prism54/oid_mgt.c void *data = priv->mib[l[i]]; l 1087 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c __le32 l; l 1092 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c ver.l = cpu_to_le32(adapter->fw_release_number); l 46 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h #define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l)) l 50 drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h #define QTN_HOST_ADDR(h, l) ((u32)l) l 52 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h #define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l)) l 56 drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h #define QTN_HOST_ADDR(h, l) ((u32)l) l 1444 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c u8 i, j, k, l, m; l 1453 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c for (l = 0; l < MAX_RF_PATH_NUM; ++l) l 1455 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c [i][j][k][m][l] l 1462 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c for (l = 0; l < MAX_RF_PATH_NUM; ++l) l 1464 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c [i][j][k][m][l] l 633 drivers/net/wireless/zydas/zd1211rw/zd_usb.c unsigned int l, k, n; l 634 drivers/net/wireless/zydas/zd1211rw/zd_usb.c for (i = 0, l = 0;; i++) { l 638 drivers/net/wireless/zydas/zd1211rw/zd_usb.c n = l+k; l 641 drivers/net/wireless/zydas/zd1211rw/zd_usb.c zd_mac_rx(zd_usb_to_hw(usb), buffer+l, k); l 644 drivers/net/wireless/zydas/zd1211rw/zd_usb.c l = (n+3) & ~3; l 2401 drivers/nvdimm/namespace_devs.c struct list_head *l, *e; l 2412 drivers/nvdimm/namespace_devs.c list_for_each_safe(l, e, &nd_mapping->labels) { l 2415 drivers/nvdimm/namespace_devs.c list_move_tail(l, &list); l 217 drivers/of/fdt.c unsigned int l, allocl; l 219 drivers/of/fdt.c pathp = fdt_get_name(blob, offset, &l); l 225 drivers/of/fdt.c allocl = ++l; l 234 drivers/of/fdt.c memcpy(fn, pathp, l); l 731 drivers/of/fdt.c unsigned long l, score = 0; l 740 drivers/of/fdt.c l = strlen(cp) + 1; l 741 drivers/of/fdt.c cp += l; l 742 drivers/of/fdt.c cplen -= l; l 912 drivers/of/fdt.c int l; l 922 drivers/of/fdt.c p = fdt_getprop(fdt, offset, "stdout-path", &l); l 924 drivers/of/fdt.c p = fdt_getprop(fdt, offset, "linux,stdout-path", &l); l 925 drivers/of/fdt.c if (!p || !l) l 931 drivers/of/fdt.c l = q - p; l 934 drivers/of/fdt.c offset = fdt_path_offset_namelen(fdt, p, l); l 936 drivers/of/fdt.c pr_warn("earlycon: stdout-path %.*s not found\n", l, p); l 1001 drivers/of/fdt.c int l; l 1008 drivers/of/fdt.c reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); l 1010 drivers/of/fdt.c reg = of_get_flat_dt_prop(node, "reg", &l); l 1014 drivers/of/fdt.c endp = reg + (l / sizeof(__be32)); l 1017 drivers/of/fdt.c pr_debug("memory scan node %s, reg size %d,\n", uname, l); l 1046 drivers/of/fdt.c int l; l 1059 drivers/of/fdt.c p = of_get_flat_dt_prop(node, "bootargs", &l); l 1060 drivers/of/fdt.c if (p != NULL && l > 0) l 1061 drivers/of/fdt.c strlcpy(data, p, min(l, COMMAND_LINE_SIZE)); l 1083 drivers/of/fdt.c rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); l 1084 drivers/of/fdt.c if (rng_seed && l > 0) { l 1085 drivers/of/fdt.c add_bootloader_randomness(rng_seed, l); l 421 drivers/of/property.c size_t l; l 433 drivers/of/property.c for (i = 0; p < end; i++, p += l) { l 434 drivers/of/property.c l = strnlen(p, end - p) + 1; l 435 drivers/of/property.c if (p + l > end) l 461 drivers/of/property.c int l = 0, i = 0; l 471 drivers/of/property.c for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) { l 472 drivers/of/property.c l = strnlen(p, end - p) + 1; l 473 drivers/of/property.c if (p + l > end) l 279 drivers/parisc/dino.c DINO_PORT_IN(l, 32, 0) l 295 drivers/parisc/dino.c DINO_PORT_OUT(l, 32, 0) l 970 drivers/parisc/lba_pci.c static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \ l 973 drivers/parisc/lba_pci.c DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \ l 974 drivers/parisc/lba_pci.c t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \ l 986 drivers/parisc/lba_pci.c static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \ l 988 drivers/parisc/lba_pci.c void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \ l 989 drivers/parisc/lba_pci.c DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \ l 992 drivers/parisc/lba_pci.c lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \ l 64 drivers/parport/share.c static size_t dead_write(struct parport *p, const void *b, size_t l, int f) l 66 drivers/parport/share.c static size_t dead_read(struct parport *p, void *b, size_t l, int f) l 460 drivers/parport/share.c struct list_head *l; l 501 drivers/parport/share.c for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) { l 502 drivers/parport/share.c struct parport *p = list_entry(l, struct parport, full_list); l 507 drivers/parport/share.c list_add_tail(&tmp->full_list, l); l 693 drivers/pci/hotplug/ibmphp_core.c u16 l; l 712 drivers/pci/hotplug/ibmphp_core.c if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) && l 713 drivers/pci/hotplug/ibmphp_core.c (l != 0x0000) && (l != 0xffff)) { l 233 drivers/pci/hotplug/pciehp_hpc.c u32 l; l 239 drivers/pci/hotplug/pciehp_hpc.c found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0); l 252 drivers/pci/hotplug/pciehp_hpc.c PCI_FUNC(devfn), count, step, l); l 178 drivers/pci/probe.c u32 l = 0, sz = 0, mask; l 196 drivers/pci/probe.c pci_read_config_dword(dev, pos, &l); l 197 drivers/pci/probe.c pci_write_config_dword(dev, pos, l | mask); l 199 drivers/pci/probe.c pci_write_config_dword(dev, pos, l); l 214 drivers/pci/probe.c if (l == 0xffffffff) l 215 drivers/pci/probe.c l = 0; l 218 drivers/pci/probe.c res->flags = decode_bar(dev, l); l 221 drivers/pci/probe.c l64 = l & PCI_BASE_ADDRESS_IO_MASK; l 225 drivers/pci/probe.c l64 = l & PCI_BASE_ADDRESS_MEM_MASK; l 230 drivers/pci/probe.c if (l & PCI_ROM_ADDRESS_ENABLE) l 232 drivers/pci/probe.c l64 = l & PCI_ROM_ADDRESS_MASK; l 238 drivers/pci/probe.c pci_read_config_dword(dev, pos + 4, &l); l 241 drivers/pci/probe.c pci_write_config_dword(dev, pos + 4, l); l 243 drivers/pci/probe.c l64 |= ((u64)l << 32); l 272 drivers/pci/probe.c if ((sizeof(pci_bus_addr_t) < 8) && l) { l 2173 drivers/pci/probe.c static bool pci_bus_crs_vendor_id(u32 l) l 2175 drivers/pci/probe.c return (l & 0xffff) == 0x0001; l 2178 drivers/pci/probe.c static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, l 2183 drivers/pci/probe.c if (!pci_bus_crs_vendor_id(*l)) l 2194 drivers/pci/probe.c while (pci_bus_crs_vendor_id(*l)) { l 2210 drivers/pci/probe.c if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) l 2222 drivers/pci/probe.c bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, l 2225 drivers/pci/probe.c if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) l 2229 drivers/pci/probe.c if (*l == 0xffffffff || *l == 0x00000000 || l 2230 drivers/pci/probe.c *l == 0x0000ffff || *l == 0xffff0000) l 2233 drivers/pci/probe.c if (pci_bus_crs_vendor_id(*l)) l 2234 drivers/pci/probe.c return pci_bus_wait_crs(bus, devfn, l, timeout); l 2239 drivers/pci/probe.c bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, l 2251 drivers/pci/probe.c return pci_idt_bus_quirk(bus, devfn, l, timeout); l 2254 drivers/pci/probe.c return pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout); l 2265 drivers/pci/probe.c u32 l; l 2267 drivers/pci/probe.c if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) l 2275 drivers/pci/probe.c dev->vendor = l & 0xffff; l 2276 drivers/pci/probe.c dev->device = (l >> 16) & 0xffff; l 5325 drivers/pci/quirks.c int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout) l 5342 drivers/pci/quirks.c found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout); l 578 drivers/pci/setup-bus.c u16 l; l 589 drivers/pci/setup-bus.c pci_read_config_word(bridge, PCI_IO_BASE, &l); l 592 drivers/pci/setup-bus.c l = ((u16) io_limit_lo << 8) | io_base_lo; l 599 drivers/pci/setup-bus.c l = 0x00f0; l 604 drivers/pci/setup-bus.c pci_write_config_word(bridge, PCI_IO_BASE, l); l 613 drivers/pci/setup-bus.c u32 l; l 619 drivers/pci/setup-bus.c l = (region.start >> 16) & 0xfff0; l 620 drivers/pci/setup-bus.c l |= region.end & 0xfff00000; l 623 drivers/pci/setup-bus.c l = 0x0000fff0; l 625 drivers/pci/setup-bus.c pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); l 632 drivers/pci/setup-bus.c u32 l, bu, lu; l 646 drivers/pci/setup-bus.c l = (region.start >> 16) & 0xfff0; l 647 drivers/pci/setup-bus.c l |= region.end & 0xfff00000; l 654 drivers/pci/setup-bus.c l = 0x0000fff0; l 656 drivers/pci/setup-bus.c pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); l 1035 drivers/pci/xen-pcifront.c int l, state; l 1036 drivers/pci/xen-pcifront.c l = snprintf(str, sizeof(str), "state-%d", i); l 1037 drivers/pci/xen-pcifront.c if (unlikely(l >= (sizeof(str) - 1))) { l 1048 drivers/pci/xen-pcifront.c l = snprintf(str, sizeof(str), "vdev-%d", i); l 1049 drivers/pci/xen-pcifront.c if (unlikely(l >= (sizeof(str) - 1))) { l 357 drivers/pcmcia/cistpl.c struct list_head *l, *n; l 360 drivers/pcmcia/cistpl.c list_for_each_safe(l, n, &s->cis_cache) { l 361 drivers/pcmcia/cistpl.c cis = list_entry(l, struct cis_cache_entry, node); l 158 drivers/perf/arm-ccn.c u64 l, h; l 465 drivers/perf/arm-ccn.c return &ccn->dt.cmp_mask[i].l; l 958 drivers/perf/arm-ccn.c u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; l 1248 drivers/perf/arm-ccn.c ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0; l 1250 drivers/perf/arm-ccn.c ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0; l 1252 drivers/perf/arm-ccn.c ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0; l 1254 drivers/perf/arm-ccn.c ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0; l 24 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c #define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l))) l 140 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c static void mtk_hw_bits_part(struct mtk_pin_field *pf, int *h, int *l) l 142 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c *l = 32 - pf->bitpos; l 143 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c *h = get_count_order(pf->mask) - *l; l 163 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c int nbits_l, nbits_h, h, l; l 167 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c l = (mtk_r32(hw, pf->index, pf->offset) l 172 drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c *value = (h << nbits_l) | l; l 24 drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h #define BANK_PMX(n, f, l, r, o) \ l 28 drivers/pinctrl/meson/pinctrl-meson-axg-pmx.h .last = l, \ l 140 drivers/pinctrl/meson/pinctrl-meson.h #define BANK_DS(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib, \ l 145 drivers/pinctrl/meson/pinctrl-meson.h .last = l, \ l 158 drivers/pinctrl/meson/pinctrl-meson.h #define BANK(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib) \ l 159 drivers/pinctrl/meson/pinctrl-meson.h BANK_DS(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib, 0, 0) l 619 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c u32 p, l, ret; l 622 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l); l 626 drivers/pinctrl/mvebu/pinctrl-armada-37xx.c if ((p ^ l) & (1 << bit_num)) { l 775 drivers/platform/x86/asus-wmi.c u32 l; l 794 drivers/platform/x86/asus-wmi.c if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) { l 798 drivers/platform/x86/asus-wmi.c absent = (l == 0xffffffff); l 568 drivers/platform/x86/eeepc-laptop.c u32 l; l 592 drivers/platform/x86/eeepc-laptop.c if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) { l 597 drivers/platform/x86/eeepc-laptop.c absent = (l == 0xffffffff); l 7848 drivers/platform/x86/thinkpad_acpi.c int l; l 7886 drivers/platform/x86/thinkpad_acpi.c } else if (sscanf(cmd, "level %u", &l) == 1 && l 7887 drivers/platform/x86/thinkpad_acpi.c l >= 0 && l <= TP_EC_VOLUME_MAX) { l 7888 drivers/platform/x86/thinkpad_acpi.c new_level = l; l 779 drivers/powercap/intel_rapl_common.c u32 l, h = 0; l 783 drivers/powercap/intel_rapl_common.c rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); l 785 drivers/powercap/intel_rapl_common.c rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE; l 788 drivers/powercap/intel_rapl_common.c l &= ~PACKAGE_THERM_INT_PLN_ENABLE; l 789 drivers/powercap/intel_rapl_common.c wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); l 816 drivers/powercap/intel_rapl_common.c u32 l, h; l 825 drivers/powercap/intel_rapl_common.c rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h); l 828 drivers/powercap/intel_rapl_common.c l |= PACKAGE_THERM_INT_PLN_ENABLE; l 830 drivers/powercap/intel_rapl_common.c l &= ~PACKAGE_THERM_INT_PLN_ENABLE; l 832 drivers/powercap/intel_rapl_common.c wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); l 707 drivers/s390/block/dasd.c struct list_head *l; l 714 drivers/s390/block/dasd.c list_for_each(l, &block->ccw_queue) l 742 drivers/s390/block/dasd.c list_for_each(l, &device->ccw_queue) l 1911 drivers/s390/block/dasd.c struct list_head *l, *n; l 1920 drivers/s390/block/dasd.c list_for_each_safe(l, n, &device->ccw_queue) { l 1921 drivers/s390/block/dasd.c cqr = list_entry(l, struct dasd_ccw_req, devlist); l 1936 drivers/s390/block/dasd.c struct list_head *l, *n; l 1940 drivers/s390/block/dasd.c list_for_each_safe(l, n, &device->ccw_queue) { l 1941 drivers/s390/block/dasd.c cqr = list_entry(l, struct dasd_ccw_req, devlist); l 1990 drivers/s390/block/dasd.c struct list_head *l, *n; l 1994 drivers/s390/block/dasd.c list_for_each_safe(l, n, final_queue) { l 1995 drivers/s390/block/dasd.c cqr = list_entry(l, struct dasd_ccw_req, devlist); l 2817 drivers/s390/block/dasd.c struct list_head *l, *n; l 2825 drivers/s390/block/dasd.c list_for_each_safe(l, n, &block->ccw_queue) { l 2826 drivers/s390/block/dasd.c cqr = list_entry(l, struct dasd_ccw_req, blocklist); l 2936 drivers/s390/block/dasd.c struct list_head *l, *n; l 2948 drivers/s390/block/dasd.c list_for_each_safe(l, n, &final_queue) { l 2949 drivers/s390/block/dasd.c cqr = list_entry(l, struct dasd_ccw_req, blocklist); l 468 drivers/s390/block/dasd_ioctl.c struct list_head *l; l 520 drivers/s390/block/dasd_ioctl.c list_for_each(l, &base->ccw_queue) l 395 drivers/s390/char/raw3270.c short l; /* Length of this structured field */ l 414 drivers/s390/char/raw3270.c char l; /* Length of this Self-Defining Parm */ l 490 drivers/s390/char/raw3270.c if (uap->uab.l == sizeof(struct raw3270_ua) && l 695 drivers/s390/char/raw3270.c struct list_head *l; l 730 drivers/s390/char/raw3270.c list_for_each(l, &raw3270_devices) { l 731 drivers/s390/char/raw3270.c tmp = list_entry(l, struct raw3270, list); l 734 drivers/s390/char/raw3270.c __list_add(&rp->list, l->prev, l); l 403 drivers/s390/char/sclp.c struct list_head *l; l 418 drivers/s390/char/sclp.c list_for_each(l, &sclp_reg_list) { l 419 drivers/s390/char/sclp.c reg = list_entry(l, struct sclp_register, list); l 475 drivers/s390/char/sclp.c struct list_head *l; l 478 drivers/s390/char/sclp.c list_for_each(l, &sclp_req_queue) { l 479 drivers/s390/char/sclp.c req = list_entry(l, struct sclp_req, list); l 585 drivers/s390/char/sclp.c struct list_head *l; l 594 drivers/s390/char/sclp.c list_for_each(l, &sclp_reg_list) { l 595 drivers/s390/char/sclp.c reg = list_entry(l, struct sclp_register, list); l 661 drivers/s390/char/sclp.c struct list_head *l; l 666 drivers/s390/char/sclp.c list_for_each(l, &sclp_reg_list) { l 667 drivers/s390/char/sclp.c t = list_entry(l, struct sclp_register, list); l 94 drivers/s390/char/sclp_tty.c struct list_head *l; l 101 drivers/s390/char/sclp_tty.c list_for_each(l, &sclp_tty_pages) l 288 drivers/s390/char/sclp_tty.c struct list_head *l; l 296 drivers/s390/char/sclp_tty.c list_for_each(l, &sclp_tty_outqueue) { l 297 drivers/s390/char/sclp_tty.c t = list_entry(l, struct sclp_buffer, list); l 617 drivers/s390/char/sclp_vt220.c struct list_head *l; l 624 drivers/s390/char/sclp_vt220.c list_for_each(l, &sclp_vt220_empty) l 637 drivers/s390/char/sclp_vt220.c struct list_head *l; l 645 drivers/s390/char/sclp_vt220.c list_for_each(l, &sclp_vt220_outqueue) { l 646 drivers/s390/char/sclp_vt220.c r = list_entry(l, struct sclp_vt220_request, list); l 884 drivers/s390/char/tape_34xx.c tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l) l 893 drivers/s390/char/tape_34xx.c list_add(&new_sbid->list, l); l 906 drivers/s390/char/tape_34xx.c struct list_head * l; l 925 drivers/s390/char/tape_34xx.c list_for_each(l, sbid_list) { l 926 drivers/s390/char/tape_34xx.c sbid = list_entry(l, struct tape_34xx_sbid, list); l 940 drivers/s390/char/tape_34xx.c tape_34xx_append_new_sbid(bid, l->prev); l 945 drivers/s390/char/tape_34xx.c if (l == sbid_list) l 946 drivers/s390/char/tape_34xx.c tape_34xx_append_new_sbid(bid, l->prev); l 949 drivers/s390/char/tape_34xx.c list_for_each(l, sbid_list) { l 950 drivers/s390/char/tape_34xx.c sbid = list_entry(l, struct tape_34xx_sbid, list); l 968 drivers/s390/char/tape_34xx.c struct list_head * l; l 975 drivers/s390/char/tape_34xx.c list_for_each_safe(l, n, sbid_list) { l 976 drivers/s390/char/tape_34xx.c sbid = list_entry(l, struct tape_34xx_sbid, list); l 983 drivers/s390/char/tape_34xx.c list_del(l); l 1000 drivers/s390/char/tape_34xx.c struct list_head * l; l 1010 drivers/s390/char/tape_34xx.c list_for_each(l, sbid_list) { l 1011 drivers/s390/char/tape_34xx.c sbid = list_entry(l, struct tape_34xx_sbid, list); l 642 drivers/s390/char/tape_core.c struct list_head * l, *n; l 644 drivers/s390/char/tape_core.c list_for_each_safe(l, n, &device->req_queue) { l 645 drivers/s390/char/tape_core.c request = list_entry(l, struct tape_request, list); l 806 drivers/s390/char/tape_core.c struct list_head *l, *n; l 815 drivers/s390/char/tape_core.c list_for_each_safe(l, n, &device->req_queue) { l 816 drivers/s390/char/tape_core.c request = list_entry(l, struct tape_request, list); l 135 drivers/s390/cio/itcw.c #define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095)) l 199 drivers/s390/cio/vfio_ccw_cp.c unsigned long l, m; l 211 drivers/s390/cio/vfio_ccw_cp.c l = n; l 220 drivers/s390/cio/vfio_ccw_cp.c m = min(l, m); l 221 drivers/s390/cio/vfio_ccw_cp.c memcpy(to + (n - l), (void *)from, m); l 223 drivers/s390/cio/vfio_ccw_cp.c l -= m; l 224 drivers/s390/cio/vfio_ccw_cp.c if (l == 0) l 230 drivers/s390/cio/vfio_ccw_cp.c return l; l 72 drivers/s390/net/ctcm_dbug.h int l = strlen(s); l 73 drivers/s390/net/ctcm_dbug.h return (l > n) ? s + (l - n) : s; l 481 drivers/s390/net/ctcm_main.c int l = skb->len + LL_HEADER_LENGTH; l 483 drivers/s390/net/ctcm_main.c if (ch->collect_len + l > ch->max_bufsize - 2) { l 488 drivers/s390/net/ctcm_main.c header.length = l; l 494 drivers/s390/net/ctcm_main.c ch->collect_len += l; l 794 drivers/s390/net/lcs.c struct list_head *l, *n; l 799 drivers/s390/net/lcs.c list_for_each_safe(l, n, &card->lancmd_waiters) { l 800 drivers/s390/net/lcs.c reply = list_entry(l, struct lcs_reply, list); l 1149 drivers/s390/net/lcs.c struct list_head *l; l 1156 drivers/s390/net/lcs.c list_for_each(l, &card->ipm_list) { l 1157 drivers/s390/net/lcs.c ipm = list_entry(l, struct lcs_ipm_list, list); l 1177 drivers/s390/net/lcs.c struct list_head *l; l 1182 drivers/s390/net/lcs.c list_for_each(l, &card->ipm_list) { l 1183 drivers/s390/net/lcs.c tmp = list_entry(l, struct lcs_ipm_list, list); l 1156 drivers/s390/net/netiucv.c int l = skb->len + NETIUCV_HDRLEN; l 1159 drivers/s390/net/netiucv.c if (conn->collect_len + l > l 1167 drivers/s390/net/netiucv.c conn->collect_len += l; l 127 drivers/s390/virtio/virtio_ccw.c struct vq_info_block_legacy l; l 461 drivers/s390/virtio/virtio_ccw.c info->info_block->l.queue = 0; l 462 drivers/s390/virtio/virtio_ccw.c info->info_block->l.align = 0; l 463 drivers/s390/virtio/virtio_ccw.c info->info_block->l.index = index; l 464 drivers/s390/virtio/virtio_ccw.c info->info_block->l.num = 0; l 465 drivers/s390/virtio/virtio_ccw.c ccw->count = sizeof(info->info_block->l); l 560 drivers/s390/virtio/virtio_ccw.c info->info_block->l.queue = queue; l 561 drivers/s390/virtio/virtio_ccw.c info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; l 562 drivers/s390/virtio/virtio_ccw.c info->info_block->l.index = i; l 563 drivers/s390/virtio/virtio_ccw.c info->info_block->l.num = info->num; l 564 drivers/s390/virtio/virtio_ccw.c ccw->count = sizeof(info->info_block->l); l 780 drivers/sbus/char/envctrl.c int l = strlen(pos) + 1; l 782 drivers/sbus/char/envctrl.c len -= l; l 783 drivers/sbus/char/envctrl.c pos += l; l 2442 drivers/scsi/advansys.c static void asc_prt_hex(char *f, uchar *s, int l) l 2449 drivers/scsi/advansys.c printk("%s: (%d bytes)\n", f, l); l 2451 drivers/scsi/advansys.c for (i = 0; i < l; i += 32) { l 2454 drivers/scsi/advansys.c if ((k = (l - i) / 4) >= 8) { l 2458 drivers/scsi/advansys.c m = (l - i) % 4; l 71 drivers/scsi/aic94xx/aic94xx_reg.h ASD_READ_OCM(u32,dword,l); l 84 drivers/scsi/aic94xx/aic94xx_reg.h ASD_WRITE_OCM(u32,dword,l); l 106 drivers/scsi/arm/queue.c struct list_head *l; l 114 drivers/scsi/arm/queue.c l = queue->free.next; l 115 drivers/scsi/arm/queue.c list_del(l); l 117 drivers/scsi/arm/queue.c q = list_entry(l, QE_t, list); l 124 drivers/scsi/arm/queue.c list_add(l, &queue->head); l 126 drivers/scsi/arm/queue.c list_add_tail(l, &queue->head); l 161 drivers/scsi/arm/queue.c struct list_head *l; l 165 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { l 166 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); l 169 drivers/scsi/arm/queue.c SCpnt = __queue_remove(queue, l); l 210 drivers/scsi/arm/queue.c struct list_head *l; l 214 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { l 215 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); l 218 drivers/scsi/arm/queue.c SCpnt = __queue_remove(queue, l); l 237 drivers/scsi/arm/queue.c struct list_head *l; l 240 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { l 241 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); l 243 drivers/scsi/arm/queue.c __queue_remove(queue, l); l 260 drivers/scsi/arm/queue.c struct list_head *l; l 264 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { l 265 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); l 286 drivers/scsi/arm/queue.c struct list_head *l; l 290 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { l 291 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); l 293 drivers/scsi/arm/queue.c __queue_remove(queue, l); l 127 drivers/scsi/atp870u.c unsigned long l; l 335 drivers/scsi/atp870u.c for (l = 0; l < workreq->cmd_len; l++) l 336 drivers/scsi/atp870u.c printk(KERN_DEBUG " %x",workreq->cmnd[l]); l 688 drivers/scsi/atp870u.c unsigned long l, bttl = 0; l 748 drivers/scsi/atp870u.c l = scsi_bufflen(workreq); l 757 drivers/scsi/atp870u.c if (l > 8) l 758 drivers/scsi/atp870u.c l = 8; l 761 drivers/scsi/atp870u.c l = 0; l 807 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&l))[2]); l 808 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&l))[1]); l 809 drivers/scsi/atp870u.c atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&l))[0]); l 811 drivers/scsi/atp870u.c dev->id[c][j].last_len = l; l 832 drivers/scsi/atp870u.c if (l == 0) { l 851 drivers/scsi/atp870u.c if (l) { l 856 drivers/scsi/atp870u.c l=sg_dma_len(sgpnt); l 858 drivers/scsi/atp870u.c printk("1. bttl %x, l %x\n",bttl, l); l 860 drivers/scsi/atp870u.c while (l > 0x10000) { l 864 drivers/scsi/atp870u.c l -= 0x10000; l 869 drivers/scsi/atp870u.c (((u16 *) (prd))[i + 2]) = cpu_to_le16(l); l 876 drivers/scsi/atp870u.c printk("2. bttl %x, l %x\n",bttl, l); l 6998 drivers/scsi/bfa/bfa_ioc.c u32 off, l, s, residue, fifo_sz; l 7010 drivers/scsi/bfa/bfa_ioc.c l = (n + 1) * fifo_sz - s; l 7011 drivers/scsi/bfa/bfa_ioc.c if (l > residue) l 7012 drivers/scsi/bfa/bfa_ioc.c l = residue; l 7014 drivers/scsi/bfa/bfa_ioc.c status = bfa_flash_read_start(pci_bar, offset + off, l, l 7029 drivers/scsi/bfa/bfa_ioc.c bfa_flash_read_end(pci_bar, l, &buf[off]); l 7031 drivers/scsi/bfa/bfa_ioc.c residue -= l; l 7032 drivers/scsi/bfa/bfa_ioc.c off += l; l 1449 drivers/scsi/esas2r/esas2r_main.c struct atto_vda_ae_lu *l = &ae->lu; l 1459 drivers/scsi/esas2r/esas2r_main.c l->dwevent = le32_to_cpu(l->dwevent); l 1460 drivers/scsi/esas2r/esas2r_main.c l->wphys_target_id = le16_to_cpu(l->wphys_target_id); l 1461 drivers/scsi/esas2r/esas2r_main.c l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id); l 1463 drivers/scsi/esas2r/esas2r_main.c if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) l 1465 drivers/scsi/esas2r/esas2r_main.c l->id.tgtlun_raid.dwinterleave l 1466 drivers/scsi/esas2r/esas2r_main.c = le32_to_cpu(l->id.tgtlun_raid.dwinterleave); l 1467 drivers/scsi/esas2r/esas2r_main.c l->id.tgtlun_raid.dwblock_size l 1468 drivers/scsi/esas2r/esas2r_main.c = le32_to_cpu(l->id.tgtlun_raid.dwblock_size); l 1580 drivers/scsi/gdth.c u8 b, t, l, firsttime; l 1601 drivers/scsi/gdth.c l = nscp->device->lun; l 1608 drivers/scsi/gdth.c b = t = l = 0; l 1628 drivers/scsi/gdth.c b, t, l)); l 1631 drivers/scsi/gdth.c if (b == 0 && t == 0 && l == 0) { l 1636 drivers/scsi/gdth.c if (b == 0 && ((t == 0 && l == 1) || l 1637 drivers/scsi/gdth.c (t == 1 && l == 0))) { l 1696 drivers/scsi/gdth.c } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) { l 1698 drivers/scsi/gdth.c nscp->cmnd[0], b, t, l)); l 2143 drivers/scsi/gdth.c u8 t,l; l 2147 drivers/scsi/gdth.c l = scp->device->lun; l 2150 drivers/scsi/gdth.c scp->cmnd[0],b,t,l)); l 2197 drivers/scsi/gdth.c cmdp->u.raw64.lun = l; l 2213 drivers/scsi/gdth.c cmdp->u.raw.lun = l; l 664 drivers/scsi/hpsa.c ssize_t l = 0; l 683 drivers/scsi/hpsa.c l = snprintf(buf, PAGE_SIZE, "N/A\n"); l 684 drivers/scsi/hpsa.c return l; l 691 drivers/scsi/hpsa.c l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); l 692 drivers/scsi/hpsa.c return l; l 351 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h #define h_copy_rdma(l, sa, sb, da, db) \ l 352 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db) l 315 drivers/scsi/imm.c unsigned char l; l 324 drivers/scsi/imm.c l = (r_str(base) & 0xf0) >> 4; l 326 drivers/scsi/imm.c *buffer++ = (r_str(base) & 0xf0) | l; l 780 drivers/scsi/imm.c unsigned char l = 0, h = 0; l 893 drivers/scsi/imm.c if (imm_in(dev, &l, 1)) { /* read status byte */ l 897 drivers/scsi/imm.c cmd->result = (DID_OK << 16) | (l & STATUS_MASK); l 1014 drivers/scsi/imm.c unsigned char l; l 1035 drivers/scsi/imm.c for (l = 0; (l < 3) && (status); l++) l 1036 drivers/scsi/imm.c status = imm_out(dev, &cmd[l << 1], 2); l 1056 drivers/scsi/imm.c l = r_str(ppb); l 1059 drivers/scsi/imm.c } while (!(l & 0x80) && (k)); l 1061 drivers/scsi/imm.c l &= 0xb8; l 1063 drivers/scsi/imm.c if (l != 0xb8) { l 1090 drivers/scsi/mpt3sas/mpi/mpi2.h #define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \ l 1091 drivers/scsi/mpt3sas/mpi/mpi2.h MPI2_SGE_LENGTH(l)) l 1095 drivers/scsi/mpt3sas/mpi/mpi2.h #define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ l 1096 drivers/scsi/mpt3sas/mpi/mpi2.h MPI2_SGE_SET_FLAGS_LENGTH(f, l)) l 1101 drivers/scsi/mpt3sas/mpi/mpi2.h #define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ l 1102 drivers/scsi/mpt3sas/mpi/mpi2.h MPI2_SGE_LENGTH(l)) l 1241 drivers/scsi/mpt3sas/mpi/mpi2.h #define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\ l 1242 drivers/scsi/mpt3sas/mpi/mpi2.h MPI2_IEEE32_SGE_LENGTH(l)) l 1248 drivers/scsi/mpt3sas/mpi/mpi2.h #define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ l 1249 drivers/scsi/mpt3sas/mpi/mpi2.h MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l)) l 1254 drivers/scsi/mpt3sas/mpi/mpi2.h #define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ l 1255 drivers/scsi/mpt3sas/mpi/mpi2.h MPI2_IEEE32_SGE_LENGTH(l)) l 944 drivers/scsi/ncr53c8xx.h #define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l)) l 945 drivers/scsi/ncr53c8xx.h #define SCR_MOVE_IND(l) ((0x20000000 | OPC_MOVE) | (l)) l 948 drivers/scsi/ncr53c8xx.h #define SCR_CHMOV_ABS(l) ((0x00000000) | (l)) l 949 drivers/scsi/ncr53c8xx.h #define SCR_CHMOV_IND(l) ((0x20000000) | (l)) l 857 drivers/scsi/nsp32.c u32_le l; l 890 drivers/scsi/nsp32.c l = le32_to_cpu(sgt[num-1].len); l 891 drivers/scsi/nsp32.c sgt[num-1].len = cpu_to_le32(l | SGTEND); l 151 drivers/scsi/nsp32_io.h unsigned long h,l; l 154 drivers/scsi/nsp32_io.h l = inw(base + DATA_REG_LOW); l 157 drivers/scsi/nsp32_io.h return ((h << 16) | l); l 164 drivers/scsi/nsp32_io.h unsigned long h,l; l 167 drivers/scsi/nsp32_io.h l = (val & 0x0000ffff) >> 0; l 170 drivers/scsi/nsp32_io.h outw(l, base + DATA_REG_LOW); l 694 drivers/scsi/pcmcia/nsp_cs.c unsigned int l, m, h, dummy; l 698 drivers/scsi/pcmcia/nsp_cs.c l = nsp_index_read(base, TRANSFERCOUNT); l 703 drivers/scsi/pcmcia/nsp_cs.c count = (h << 16) | (m << 8) | (l << 0); l 674 drivers/scsi/ppa.c unsigned char l = 0, h = 0; l 774 drivers/scsi/ppa.c if (ppa_in(dev, &l, 1)) { /* read status byte */ l 779 drivers/scsi/ppa.c (DID_OK << 16) + (h << 8) + (l & STATUS_MASK); l 888 drivers/scsi/ppa.c unsigned char l; l 909 drivers/scsi/ppa.c for (l = 0; (l < 6) && (status); l++) l 931 drivers/scsi/ppa.c l = r_str(ppb); l 934 drivers/scsi/ppa.c } while (!(l & 0x80) && (k)); l 936 drivers/scsi/ppa.c l &= 0xf0; l 938 drivers/scsi/ppa.c if (l != 0xf0) { l 2955 drivers/scsi/qla2xxx/qla_attr.c dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, l 2958 drivers/scsi/qla2xxx/qla_attr.c vha->gnl.l = NULL; l 2960 drivers/scsi/qla2xxx/qla_attr.c vfree(vha->scan.l); l 277 drivers/scsi/qla2xxx/qla_def.h struct get_name_list_extended *l; l 3057 drivers/scsi/qla2xxx/qla_def.h struct fab_scan_rp *l; l 3597 drivers/scsi/qla2xxx/qla_gs.c rp = &vha->scan.l[i]; l 3606 drivers/scsi/qla2xxx/qla_gs.c trp = &vha->scan.l[k]; l 3794 drivers/scsi/qla2xxx/qla_gs.c rp = &vha->scan.l[j]; l 3801 drivers/scsi/qla2xxx/qla_gs.c rp = &vha->scan.l[k]; l 3815 drivers/scsi/qla2xxx/qla_gs.c rp = &vha->scan.l[k]; l 3830 drivers/scsi/qla2xxx/qla_gs.c rp = &vha->scan.l[k]; l 3845 drivers/scsi/qla2xxx/qla_gs.c rp = &vha->scan.l[k]; l 4138 drivers/scsi/qla2xxx/qla_gs.c memset(vha->scan.l, 0, vha->scan.size); l 731 drivers/scsi/qla2xxx/qla_init.c e = &vha->gnl.l[i]; l 894 drivers/scsi/qla2xxx/qla_init.c e = &vha->gnl.l[i]; l 998 drivers/scsi/qla2xxx/qla_init.c e = &vha->gnl.l[i]; l 1035 drivers/scsi/qla2xxx/qla_init.c e = &vha->gnl.l[i]; l 1520 drivers/scsi/qla2xxx/qla_mbx.c qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) l 1568 drivers/scsi/qla2xxx/qla_mbx.c qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) l 1586 drivers/scsi/qla2xxx/qla_mbx.c mcp->mb[2] = (u32)l; l 1599 drivers/scsi/qla2xxx/qla_mbx.c rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, l 3194 drivers/scsi/qla2xxx/qla_mbx.c uint64_t l, int tag) l 3236 drivers/scsi/qla2xxx/qla_mbx.c int_to_scsilun(l, &tsk->p.tsk.lun); l 3272 drivers/scsi/qla2xxx/qla_mbx.c rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, l 3288 drivers/scsi/qla2xxx/qla_mbx.c qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag) l 3293 drivers/scsi/qla2xxx/qla_mbx.c return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); l 3295 drivers/scsi/qla2xxx/qla_mbx.c return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); l 3299 drivers/scsi/qla2xxx/qla_mbx.c qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag) l 3304 drivers/scsi/qla2xxx/qla_mbx.c return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); l 3306 drivers/scsi/qla2xxx/qla_mbx.c return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); l 732 drivers/scsi/qla2xxx/qla_mr.c qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag) l 734 drivers/scsi/qla2xxx/qla_mr.c return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); l 738 drivers/scsi/qla2xxx/qla_mr.c qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag) l 740 drivers/scsi/qla2xxx/qla_mr.c return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); l 1321 drivers/scsi/qla2xxx/qla_os.c uint64_t l, enum nexus_wait_type type) l 1354 drivers/scsi/qla2xxx/qla_os.c cmd->device->lun == l); l 3452 drivers/scsi/qla2xxx/qla_os.c if (base_vha->gnl.l) { l 3454 drivers/scsi/qla2xxx/qla_os.c base_vha->gnl.l, base_vha->gnl.ldma); l 3455 drivers/scsi/qla2xxx/qla_os.c base_vha->gnl.l = NULL; l 3694 drivers/scsi/qla2xxx/qla_os.c base_vha->gnl.l, base_vha->gnl.ldma); l 3695 drivers/scsi/qla2xxx/qla_os.c base_vha->gnl.l = NULL; l 3731 drivers/scsi/qla2xxx/qla_os.c base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); l 3733 drivers/scsi/qla2xxx/qla_os.c base_vha->gnl.l = NULL; l 3735 drivers/scsi/qla2xxx/qla_os.c vfree(base_vha->scan.l); l 4822 drivers/scsi/qla2xxx/qla_os.c vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev, l 4824 drivers/scsi/qla2xxx/qla_os.c if (!vha->gnl.l) { l 4833 drivers/scsi/qla2xxx/qla_os.c vha->scan.l = vmalloc(vha->scan.size); l 4834 drivers/scsi/qla2xxx/qla_os.c if (!vha->scan.l) { l 4838 drivers/scsi/qla2xxx/qla_os.c vha->gnl.l, vha->gnl.ldma); l 4839 drivers/scsi/qla2xxx/qla_os.c vha->gnl.l = NULL; l 4576 drivers/scsi/scsi_debug.c int f, j, l; l 4608 drivers/scsi/scsi_debug.c l = find_last_bit(sqp->in_use_bm, sdebug_max_queue); l 4610 drivers/scsi/scsi_debug.c "first,last bits", f, l); l 805 drivers/scsi/scsi_transport_spi.c int l, result; l 826 drivers/scsi/scsi_transport_spi.c for (l = 0; ; l++) { l 831 drivers/scsi/scsi_transport_spi.c if(l >= 3) l 4606 drivers/scsi/st.c ssize_t l = 0; l 4608 drivers/scsi/st.c l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); l 4609 drivers/scsi/st.c return l; l 4618 drivers/scsi/st.c ssize_t l = 0; l 4620 drivers/scsi/st.c l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); l 4621 drivers/scsi/st.c return l; l 4630 drivers/scsi/st.c ssize_t l = 0; l 4634 drivers/scsi/st.c l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density); l 4635 drivers/scsi/st.c return l; l 4644 drivers/scsi/st.c ssize_t l = 0; l 4646 drivers/scsi/st.c l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); l 4647 drivers/scsi/st.c return l; l 4657 drivers/scsi/st.c ssize_t l = 0; l 4675 drivers/scsi/st.c l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); l 4676 drivers/scsi/st.c return l; l 413 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_MOVE_ABS(l) ((0x00000000 | OPC_MOVE) | (l)) l 417 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_CHMOV_ABS(l) ((0x00000000) | (l)) l 426 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_TCHMOVE_ABS(l) ((0x20000000 | OPC_TCHMOVE) | (l)) l 429 drivers/scsi/sym53c8xx_2/sym_defs.h #define SCR_TMOV_ABS(l) ((0x20000000) | (l)) l 897 drivers/scsi/sym53c8xx_2/sym_glue.c int t, l; l 953 drivers/scsi/sym53c8xx_2/sym_glue.c for (l = 0; l < SYM_CONF_MAX_LUN; l++) l 954 drivers/scsi/sym53c8xx_2/sym_glue.c sym_tune_dev_queuing(tp, l, uc->data); l 962 drivers/scsi/sym53c8xx_2/sym_glue.c for (l = 0; l < SYM_CONF_MAX_LUN; l++) { l 963 drivers/scsi/sym53c8xx_2/sym_glue.c struct sym_lcb *lp = sym_lp(tp, l); l 1179 drivers/scsi/sym53c8xx_2/sym_hipd.h #define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n) l 1180 drivers/scsi/sym53c8xx_2/sym_hipd.h #define _sym_mfree_dma(np, p, l, n) \ l 1181 drivers/scsi/sym53c8xx_2/sym_hipd.h __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n) l 1182 drivers/scsi/sym53c8xx_2/sym_hipd.h #define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n) l 1183 drivers/scsi/sym53c8xx_2/sym_hipd.h #define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n) l 64 drivers/slimbus/qcom-ctrl.c #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \ l 65 drivers/slimbus/qcom-ctrl.c ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16)) l 91 drivers/slimbus/qcom-ngd-ctrl.c #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \ l 92 drivers/slimbus/qcom-ngd-ctrl.c ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16)) l 430 drivers/slimbus/slimbus.h void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 l); l 179 drivers/soundwire/intel.c static ssize_t intel_sprintf(void __iomem *mem, bool l, l 184 drivers/soundwire/intel.c if (l) l 204 drivers/spi/spi-omap2-mcspi.c u32 l, rw; l 206 drivers/spi/spi-omap2-mcspi.c l = mcspi_cached_chconf0(spi); l 214 drivers/spi/spi-omap2-mcspi.c l |= rw; l 216 drivers/spi/spi-omap2-mcspi.c l &= ~rw; l 218 drivers/spi/spi-omap2-mcspi.c mcspi_write_chconf0(spi, l); l 224 drivers/spi/spi-omap2-mcspi.c u32 l; l 226 drivers/spi/spi-omap2-mcspi.c l = cs->chctrl0; l 228 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_CHCTRL_EN; l 230 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCTRL_EN; l 231 drivers/spi/spi-omap2-mcspi.c cs->chctrl0 = l; l 240 drivers/spi/spi-omap2-mcspi.c u32 l; l 257 drivers/spi/spi-omap2-mcspi.c l = mcspi_cached_chconf0(spi); l 260 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_FORCE; l 262 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_CHCONF_FORCE; l 264 drivers/spi/spi-omap2-mcspi.c mcspi_write_chconf0(spi, l); l 275 drivers/spi/spi-omap2-mcspi.c u32 l; l 280 drivers/spi/spi-omap2-mcspi.c l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL); l 281 drivers/spi/spi-omap2-mcspi.c l &= ~(OMAP2_MCSPI_MODULCTRL_STEST); l 283 drivers/spi/spi-omap2-mcspi.c l |= (OMAP2_MCSPI_MODULCTRL_MS); l 285 drivers/spi/spi-omap2-mcspi.c l &= ~(OMAP2_MCSPI_MODULCTRL_MS); l 286 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_MODULCTRL_SINGLE; l 288 drivers/spi/spi-omap2-mcspi.c mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); l 290 drivers/spi/spi-omap2-mcspi.c ctx->modulctrl = l; l 437 drivers/spi/spi-omap2-mcspi.c u32 l; l 456 drivers/spi/spi-omap2-mcspi.c l = mcspi_cached_chconf0(spi); l 474 drivers/spi/spi-omap2-mcspi.c if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) l 540 drivers/spi/spi-omap2-mcspi.c if (l & OMAP2_MCSPI_CHCONF_TURBO) { l 694 drivers/spi/spi-omap2-mcspi.c u32 l; l 705 drivers/spi/spi-omap2-mcspi.c l = mcspi_cached_chconf0(spi); l 743 drivers/spi/spi-omap2-mcspi.c (l & OMAP2_MCSPI_CHCONF_TURBO)) { l 790 drivers/spi/spi-omap2-mcspi.c (l & OMAP2_MCSPI_CHCONF_TURBO)) { l 837 drivers/spi/spi-omap2-mcspi.c (l & OMAP2_MCSPI_CHCONF_TURBO)) { l 897 drivers/spi/spi-omap2-mcspi.c u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0; l 924 drivers/spi/spi-omap2-mcspi.c l = mcspi_cached_chconf0(spi); l 930 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_IS; l 931 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_DPE1; l 932 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_CHCONF_DPE0; l 934 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_CHCONF_IS; l 935 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_CHCONF_DPE1; l 936 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_DPE0; l 940 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_WL_MASK; l 941 drivers/spi/spi-omap2-mcspi.c l |= (word_len - 1) << 7; l 945 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */ l 947 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_EPOL; l 950 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK; l 951 drivers/spi/spi-omap2-mcspi.c l |= clkd << 2; l 954 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_CLKG; l 955 drivers/spi/spi-omap2-mcspi.c l |= clkg; l 964 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_CHCONF_POL; l 966 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_POL; l 968 drivers/spi/spi-omap2-mcspi.c l |= OMAP2_MCSPI_CHCONF_PHA; l 970 drivers/spi/spi-omap2-mcspi.c l &= ~OMAP2_MCSPI_CHCONF_PHA; l 972 drivers/spi/spi-omap2-mcspi.c mcspi_write_chconf0(spi, l); l 217 drivers/spi/spi-pic32.c BUILD_SPI_FIFO_RW(dword, u32, l); l 936 drivers/spi/spi-sh-msiof.c unsigned int l = 0; l 939 drivers/spi/spi-sh-msiof.c l = min(round_down(len, 4), p->tx_fifo_size * 4); l 941 drivers/spi/spi-sh-msiof.c l = min(round_down(len, 4), p->rx_fifo_size * 4); l 952 drivers/spi/spi-sh-msiof.c copy32(p->tx_dma_page, tx_buf, l / 4); l 954 drivers/spi/spi-sh-msiof.c ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l); l 964 drivers/spi/spi-sh-msiof.c copy32(rx_buf, p->rx_dma_page, l / 4); l 965 drivers/spi/spi-sh-msiof.c rx_buf += l; l 968 drivers/spi/spi-sh-msiof.c tx_buf += l; l 970 drivers/spi/spi-sh-msiof.c len -= l; l 212 drivers/staging/comedi/comedi_buf.c int l = min_t(int, len - done, PAGE_SIZE - pgoff); l 216 drivers/staging/comedi/comedi_buf.c memcpy(b, buf, l); l 218 drivers/staging/comedi/comedi_buf.c memcpy(buf, b, l); l 219 drivers/staging/comedi/comedi_buf.c buf += l; l 220 drivers/staging/comedi/comedi_buf.c done += l; l 94 drivers/staging/comedi/drivers/jr3_pci.c struct comedi_lrange l; l 527 drivers/staging/comedi/drivers/jr3_pci.c r[0].l.range[0].min = -get_s16(&fs->fx) * 1000; l 528 drivers/staging/comedi/drivers/jr3_pci.c r[0].l.range[0].max = get_s16(&fs->fx) * 1000; l 529 drivers/staging/comedi/drivers/jr3_pci.c r[1].l.range[0].min = -get_s16(&fs->fy) * 1000; l 530 drivers/staging/comedi/drivers/jr3_pci.c r[1].l.range[0].max = get_s16(&fs->fy) * 1000; l 531 drivers/staging/comedi/drivers/jr3_pci.c r[2].l.range[0].min = -get_s16(&fs->fz) * 1000; l 532 drivers/staging/comedi/drivers/jr3_pci.c r[2].l.range[0].max = get_s16(&fs->fz) * 1000; l 533 drivers/staging/comedi/drivers/jr3_pci.c r[3].l.range[0].min = -get_s16(&fs->mx) * 100; l 534 drivers/staging/comedi/drivers/jr3_pci.c r[3].l.range[0].max = get_s16(&fs->mx) * 100; l 535 drivers/staging/comedi/drivers/jr3_pci.c r[4].l.range[0].min = -get_s16(&fs->my) * 100; l 536 drivers/staging/comedi/drivers/jr3_pci.c r[4].l.range[0].max = get_s16(&fs->my) * 100; l 537 drivers/staging/comedi/drivers/jr3_pci.c r[5].l.range[0].min = -get_s16(&fs->mz) * 100; l 539 drivers/staging/comedi/drivers/jr3_pci.c r[5].l.range[0].max = get_s16(&fs->mz) * 100; l 540 drivers/staging/comedi/drivers/jr3_pci.c r[6].l.range[0].min = -get_s16(&fs->v1) * 100; l 541 drivers/staging/comedi/drivers/jr3_pci.c r[6].l.range[0].max = get_s16(&fs->v1) * 100; l 542 drivers/staging/comedi/drivers/jr3_pci.c r[7].l.range[0].min = -get_s16(&fs->v2) * 100; l 543 drivers/staging/comedi/drivers/jr3_pci.c r[7].l.range[0].max = get_s16(&fs->v2) * 100; l 544 drivers/staging/comedi/drivers/jr3_pci.c r[8].l.range[0].min = 0; l 545 drivers/staging/comedi/drivers/jr3_pci.c r[8].l.range[0].max = 65535; l 636 drivers/staging/comedi/drivers/jr3_pci.c spriv->range[j].l.length = 1; l 637 drivers/staging/comedi/drivers/jr3_pci.c spriv->range[j].l.range[0].min = -1000000; l 638 drivers/staging/comedi/drivers/jr3_pci.c spriv->range[j].l.range[0].max = 1000000; l 641 drivers/staging/comedi/drivers/jr3_pci.c spriv->range_table_list[j + k * 8] = &spriv->range[j].l; l 645 drivers/staging/comedi/drivers/jr3_pci.c spriv->range[8].l.length = 1; l 646 drivers/staging/comedi/drivers/jr3_pci.c spriv->range[8].l.range[0].min = 0; l 647 drivers/staging/comedi/drivers/jr3_pci.c spriv->range[8].l.range[0].max = 65535; l 649 drivers/staging/comedi/drivers/jr3_pci.c spriv->range_table_list[56] = &spriv->range[8].l; l 650 drivers/staging/comedi/drivers/jr3_pci.c spriv->range_table_list[57] = &spriv->range[8].l; l 141 drivers/staging/fwserial/dma_fifo.c int ofs, l; l 154 drivers/staging/fwserial/dma_fifo.c l = min(n, fifo->capacity - ofs); l 155 drivers/staging/fwserial/dma_fifo.c memcpy(fifo->data + ofs, src, l); l 156 drivers/staging/fwserial/dma_fifo.c memcpy(fifo->data, src + l, n - l); l 184 drivers/staging/fwserial/dma_fifo.c unsigned int len, n, ofs, l, limit; l 203 drivers/staging/fwserial/dma_fifo.c l = fifo->capacity - ofs; l 204 drivers/staging/fwserial/dma_fifo.c limit = min_t(unsigned int, l, fifo->tx_limit); l 209 drivers/staging/fwserial/dma_fifo.c fifo->out += l; l 176 drivers/staging/isdn/avm/b1pcmcia.c struct list_head *l; l 179 drivers/staging/isdn/avm/b1pcmcia.c list_for_each(l, &cards) { l 180 drivers/staging/isdn/avm/b1pcmcia.c card = list_entry(l, avmcard, list); l 677 drivers/staging/isdn/gigaset/bas-gigaset.c unsigned l; l 717 drivers/staging/isdn/gigaset/bas-gigaset.c l = (unsigned) ucs->int_in_buf[1] + l 721 drivers/staging/isdn/gigaset/bas-gigaset.c urb->actual_length, (int)ucs->int_in_buf[0], l, l 770 drivers/staging/isdn/gigaset/bas-gigaset.c atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES, l 774 drivers/staging/isdn/gigaset/bas-gigaset.c channel, bcs->hw.bas->numsub, l, l 779 drivers/staging/isdn/gigaset/bas-gigaset.c if (!l) { l 789 drivers/staging/isdn/gigaset/bas-gigaset.c l, ucs->rcvbuf_size); l 800 drivers/staging/isdn/gigaset/bas-gigaset.c ucs->rcvbuf = kmalloc(l, GFP_ATOMIC); l 806 drivers/staging/isdn/gigaset/bas-gigaset.c ucs->rcvbuf_size = l; l 831 drivers/staging/isdn/gigaset/bas-gigaset.c (int) ucs->int_in_buf[0], l); l 163 drivers/staging/isdn/gigaset/capi.c int l = 0; l 165 drivers/staging/isdn/gigaset/capi.c if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen) l 167 drivers/staging/isdn/gigaset/capi.c out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]); l 170 drivers/staging/isdn/gigaset/capi.c out[0] = l; l 171 drivers/staging/isdn/gigaset/capi.c return l; l 233 drivers/staging/isdn/gigaset/capi.c int i, l; l 238 drivers/staging/isdn/gigaset/capi.c l = CAPIMSG_LEN(data); l 239 drivers/staging/isdn/gigaset/capi.c if (l < 12) { l 240 drivers/staging/isdn/gigaset/capi.c gig_dbg(level, "%s: ??? LEN=%04d", tag, l); l 245 drivers/staging/isdn/gigaset/capi.c CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l, l 247 drivers/staging/isdn/gigaset/capi.c l -= 12; l 248 drivers/staging/isdn/gigaset/capi.c if (l <= 0) l 250 drivers/staging/isdn/gigaset/capi.c if (l > 64) l 251 drivers/staging/isdn/gigaset/capi.c l = 64; /* arbitrary limit */ l 252 drivers/staging/isdn/gigaset/capi.c dbgline = kmalloc_array(3, l, GFP_ATOMIC); l 255 drivers/staging/isdn/gigaset/capi.c for (i = 0; i < l; i++) { l 260 drivers/staging/isdn/gigaset/capi.c dbgline[3 * l - 1] = '\0'; l 266 drivers/staging/isdn/gigaset/capi.c l = CAPIMSG_DATALEN(data); l 267 drivers/staging/isdn/gigaset/capi.c gig_dbg(level, " DataLength=%d", l); l 268 drivers/staging/isdn/gigaset/capi.c if (l <= 0 || !(gigaset_debuglevel & DEBUG_LLDATA)) l 270 drivers/staging/isdn/gigaset/capi.c if (l > 64) l 271 drivers/staging/isdn/gigaset/capi.c l = 64; /* arbitrary limit */ l 272 drivers/staging/isdn/gigaset/capi.c dbgline = kmalloc_array(3, l, GFP_ATOMIC); l 276 drivers/staging/isdn/gigaset/capi.c for (i = 0; i < l; i++) { l 281 drivers/staging/isdn/gigaset/capi.c dbgline[3 * l - 1] = '\0'; l 1336 drivers/staging/isdn/gigaset/capi.c int i, l, lbc, lhlc; l 1382 drivers/staging/isdn/gigaset/capi.c l = *pp++; l 1393 drivers/staging/isdn/gigaset/capi.c l--; l 1395 drivers/staging/isdn/gigaset/capi.c if (l >= 2 && pp[0] == '*' && pp[1] == '*') { l 1398 drivers/staging/isdn/gigaset/capi.c l -= 2; l 1405 drivers/staging/isdn/gigaset/capi.c commands[AT_DIAL] = kmalloc(l + 3, GFP_KERNEL); l 1408 drivers/staging/isdn/gigaset/capi.c snprintf(commands[AT_DIAL], l + 3, "D%.*s\r", l, pp); l 1413 drivers/staging/isdn/gigaset/capi.c l = *pp++; l 1427 drivers/staging/isdn/gigaset/capi.c l--; l 1430 drivers/staging/isdn/gigaset/capi.c if (!l) { l 1454 drivers/staging/isdn/gigaset/capi.c l--; l 1456 drivers/staging/isdn/gigaset/capi.c if (l) { l 1458 drivers/staging/isdn/gigaset/capi.c commands[AT_MSN] = kmalloc(l + 8, GFP_KERNEL); l 1461 drivers/staging/isdn/gigaset/capi.c snprintf(commands[AT_MSN], l + 8, "^SMSN=%*s\r", l, pp); l 1496 drivers/staging/isdn/gigaset/capi.c l = lbc + 7; /* "^SBC=" + value + "\r" + null byte */ l 1498 drivers/staging/isdn/gigaset/capi.c l += lhlc + 7; /* ";^SHLC=" + value */ l 1499 drivers/staging/isdn/gigaset/capi.c commands[AT_BC] = kmalloc(l, GFP_KERNEL); l 1518 drivers/staging/isdn/gigaset/capi.c strcpy(commands[AT_BC] + l - 2, "\r"); l 88 drivers/staging/isdn/hysdn/hysdn_boot.c long l; l 116 drivers/staging/isdn/hysdn/hysdn_boot.c l = POF_BOOT_LOADER_OFF_IN_PAGE - l 118 drivers/staging/isdn/hysdn/hysdn_boot.c if (l > 0) { l 120 drivers/staging/isdn/hysdn/hysdn_boot.c imgp += l; /* advance pointer */ l 121 drivers/staging/isdn/hysdn/hysdn_boot.c img_len -= l; /* adjust len */ l 13 drivers/staging/media/sunxi/cedrus/cedrus_regs.h #define SHIFT_AND_MASK_BITS(v, h, l) \ l 14 drivers/staging/media/sunxi/cedrus/cedrus_regs.h (((unsigned long)(v) << (l)) & GENMASK(h, l)) l 52 drivers/staging/media/sunxi/cedrus/cedrus_regs.h #define VE_CHROMA_BUF_LEN_SDRT(l) SHIFT_AND_MASK_BITS(l, 27, 0) l 1920 drivers/staging/rtl8723bs/hal/hal_com_phycfg.c u8 i, j, k, l, m; l 1928 drivers/staging/rtl8723bs/hal/hal_com_phycfg.c for (l = 0; l < MAX_RF_PATH_NUM; ++l) l 1929 drivers/staging/rtl8723bs/hal/hal_com_phycfg.c pHalData->TxPwrLimit_2_4G[i][j][k][m][l] = MAX_POWER_INDEX; l 1936 drivers/staging/rtl8723bs/hal/hal_com_phycfg.c for (l = 0; l < MAX_RF_PATH_NUM; ++l) l 1937 drivers/staging/rtl8723bs/hal/hal_com_phycfg.c pHalData->TxPwrLimit_5G[i][j][k][m][l] = MAX_POWER_INDEX; l 150 drivers/staging/wlan-ng/p80211types.h #define P80211DID_MK(a, m, l) ((((u32)(a)) & (m)) << (l)) l 175 drivers/staging/wlan-ng/p80211types.h #define P80211DID_GET(a, m, l) ((((u32)(a)) >> (l)) & (m)) l 52 drivers/staging/wusbcore/host/whci/whci-hc.h #define QTD_STS_LEN(l) ((l) << 0) /* transfer length */ l 151 drivers/staging/wusbcore/host/whci/whci-hc.h #define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */ l 177 drivers/thermal/intel/x86_pkg_temp_thermal.c u32 l, h, mask, shift, intr; l 184 drivers/thermal/intel/x86_pkg_temp_thermal.c &l, &h); l 197 drivers/thermal/intel/x86_pkg_temp_thermal.c l &= ~mask; l 203 drivers/thermal/intel/x86_pkg_temp_thermal.c l &= ~intr; l 205 drivers/thermal/intel/x86_pkg_temp_thermal.c l |= (zonedev->tj_max - temp)/1000 << shift; l 206 drivers/thermal/intel/x86_pkg_temp_thermal.c l |= intr; l 210 drivers/thermal/intel/x86_pkg_temp_thermal.c l, h); l 237 drivers/thermal/intel/x86_pkg_temp_thermal.c u32 l, h; l 239 drivers/thermal/intel/x86_pkg_temp_thermal.c rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); l 241 drivers/thermal/intel/x86_pkg_temp_thermal.c thres_0 = (l & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0; l 242 drivers/thermal/intel/x86_pkg_temp_thermal.c thres_1 = (l & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1; l 244 drivers/thermal/intel/x86_pkg_temp_thermal.c l |= THERM_INT_THRESHOLD0_ENABLE; l 246 drivers/thermal/intel/x86_pkg_temp_thermal.c l |= THERM_INT_THRESHOLD1_ENABLE; l 247 drivers/thermal/intel/x86_pkg_temp_thermal.c wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); l 253 drivers/thermal/intel/x86_pkg_temp_thermal.c u32 l, h; l 255 drivers/thermal/intel/x86_pkg_temp_thermal.c rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); l 257 drivers/thermal/intel/x86_pkg_temp_thermal.c l &= ~(THERM_INT_THRESHOLD0_ENABLE | THERM_INT_THRESHOLD1_ENABLE); l 258 drivers/thermal/intel/x86_pkg_temp_thermal.c wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); l 202 drivers/tty/hvc/hvsi_lib.c unsigned int l = min(count, (int)pv->inbuf_pktlen); l 203 drivers/tty/hvc/hvsi_lib.c memcpy(&buf[read], &pv->inbuf[pv->inbuf_cur], l); l 204 drivers/tty/hvc/hvsi_lib.c pv->inbuf_cur += l; l 205 drivers/tty/hvc/hvsi_lib.c pv->inbuf_pktlen -= l; l 206 drivers/tty/hvc/hvsi_lib.c count -= l; l 207 drivers/tty/hvc/hvsi_lib.c read += l; l 111 drivers/tty/serial/8250/8250_core.c struct list_head *l, *end = NULL; l 118 drivers/tty/serial/8250/8250_core.c l = i->head; l 123 drivers/tty/serial/8250/8250_core.c up = list_entry(l, struct uart_8250_port, list); l 130 drivers/tty/serial/8250/8250_core.c end = l; l 132 drivers/tty/serial/8250/8250_core.c l = l->next; l 134 drivers/tty/serial/8250/8250_core.c if (l == i->head && pass_counter++ > PASS_LIMIT) l 136 drivers/tty/serial/8250/8250_core.c } while (l != end); l 561 drivers/tty/serial/arc_uart.c unsigned int l, h, hw_val; l 567 drivers/tty/serial/arc_uart.c l = hw_val & 0xFF; l 570 drivers/tty/serial/arc_uart.c UART_SET_BAUDL(port, l); l 87 drivers/tty/vcc.c #define vccdbgl(l) \ l 90 drivers/tty/vcc.c ldc_print(l); \ l 562 drivers/tty/vt/consolemap.c u16 **p1, *p2, l; l 582 drivers/tty/vt/consolemap.c l = 0; /* unicode value */ l 589 drivers/tty/vt/consolemap.c for (k = 0; k < 64; k++, l++) l 595 drivers/tty/vt/consolemap.c err1 = con_insert_unipair(q, l, p2[k]); l 607 drivers/tty/vt/consolemap.c l += 64; l 612 drivers/tty/vt/consolemap.c l += 32 * 64; l 730 drivers/usb/atm/cxacru.c int l = le32_to_cpu(buf[offb++]); l 732 drivers/usb/atm/cxacru.c if (l < 0 || l > stride || l > (len - offb) / 2) { l 735 drivers/usb/atm/cxacru.c cm, l); l 739 drivers/usb/atm/cxacru.c while (l--) { l 953 drivers/usb/atm/cxacru.c int l = min_t(int, stride, size - offd); l 956 drivers/usb/atm/cxacru.c buf[offb++] = l; l 961 drivers/usb/atm/cxacru.c addr += l; l 962 drivers/usb/atm/cxacru.c if (l) l 963 drivers/usb/atm/cxacru.c memcpy(buf + offb, data + offd, l); l 964 drivers/usb/atm/cxacru.c if (l < stride) l 965 drivers/usb/atm/cxacru.c memset(buf + offb + l, 0, stride - l); l 793 drivers/usb/atm/ueagle-atm.c u64 l; l 805 drivers/usb/atm/ueagle-atm.c l = E4_PAGE_BYTES(blockidx->PageSize); l 806 drivers/usb/atm/ueagle-atm.c sum += l; l 807 drivers/usb/atm/ueagle-atm.c l += le32_to_cpu(blockidx->PageOffset); l 808 drivers/usb/atm/ueagle-atm.c if (l > len) l 809 drivers/usb/gadget/configfs.c int res, l; l 811 drivers/usb/gadget/configfs.c l = min((int)len, OS_STRING_QW_SIGN_LEN >> 1); l 812 drivers/usb/gadget/configfs.c if (page[l - 1] == '\n') l 813 drivers/usb/gadget/configfs.c --l; l 816 drivers/usb/gadget/configfs.c res = utf8s_to_utf16s(page, l, l 1102 drivers/usb/gadget/configfs.c int l; l 1104 drivers/usb/gadget/configfs.c l = min_t(int, 8, len); l 1105 drivers/usb/gadget/configfs.c if (page[l - 1] == '\n') l 1106 drivers/usb/gadget/configfs.c --l; l 1109 drivers/usb/gadget/configfs.c memcpy(desc->ext_compat_id, page, l); l 1128 drivers/usb/gadget/configfs.c int l; l 1130 drivers/usb/gadget/configfs.c l = min_t(int, 8, len); l 1131 drivers/usb/gadget/configfs.c if (page[l - 1] == '\n') l 1132 drivers/usb/gadget/configfs.c --l; l 1135 drivers/usb/gadget/configfs.c memcpy(desc->ext_compat_id + 8, page, l); l 44 drivers/usb/gadget/function/uvc_configfs.c static int uvcg_config_compare_u32(const void *l, const void *r) l 46 drivers/usb/gadget/function/uvc_configfs.c u32 li = *(const u32 *)l; l 1197 drivers/usb/gadget/udc/omap_udc.c u32 l; l 1199 drivers/usb/gadget/udc/omap_udc.c l = omap_readl(OTG_CTRL); l 1200 drivers/usb/gadget/udc/omap_udc.c l |= OTG_BSESSVLD; l 1201 drivers/usb/gadget/udc/omap_udc.c omap_writel(l, OTG_CTRL); l 1211 drivers/usb/gadget/udc/omap_udc.c u32 l; l 1213 drivers/usb/gadget/udc/omap_udc.c l = omap_readl(OTG_CTRL); l 1214 drivers/usb/gadget/udc/omap_udc.c l &= ~OTG_BSESSVLD; l 1215 drivers/usb/gadget/udc/omap_udc.c omap_writel(l, OTG_CTRL); l 1248 drivers/usb/gadget/udc/omap_udc.c u32 l; l 1256 drivers/usb/gadget/udc/omap_udc.c l = omap_readl(FUNC_MUX_CTRL_0); l 1258 drivers/usb/gadget/udc/omap_udc.c l |= VBUS_CTRL_1510; l 1260 drivers/usb/gadget/udc/omap_udc.c l &= ~VBUS_CTRL_1510; l 1261 drivers/usb/gadget/udc/omap_udc.c omap_writel(l, FUNC_MUX_CTRL_0); l 1380 drivers/usb/gadget/udc/omap_udc.c u32 l; l 1382 drivers/usb/gadget/udc/omap_udc.c l = omap_readl(OTG_CTRL); l 1383 drivers/usb/gadget/udc/omap_udc.c l |= OTG_B_HNPEN | OTG_B_BUSREQ; l 1384 drivers/usb/gadget/udc/omap_udc.c l &= ~OTG_PULLUP; l 1385 drivers/usb/gadget/udc/omap_udc.c omap_writel(l, OTG_CTRL); l 178 drivers/usb/host/ohci-omap.c u32 l; l 185 drivers/usb/host/ohci-omap.c l = omap_readl(OTG_CTRL); l 186 drivers/usb/host/ohci-omap.c l &= ~OTG_A_BUSREQ; l 187 drivers/usb/host/ohci-omap.c omap_writel(l, OTG_CTRL); l 2341 drivers/usb/host/u132-hcd.c int l = 0; l 2348 drivers/usb/host/u132-hcd.c l += w; l 1627 drivers/usb/misc/ftdi-elan.c int l = 0; l 1651 drivers/usb/misc/ftdi-elan.c l += w; l 106 drivers/usb/musb/omap2430.c u32 l; l 109 drivers/usb/musb/omap2430.c l = musb_readl(musb->mregs, OTG_FORCESTDBY); l 110 drivers/usb/musb/omap2430.c l |= ENABLEFORCE; /* enable MSTANDBY */ l 111 drivers/usb/musb/omap2430.c musb_writel(musb->mregs, OTG_FORCESTDBY, l); l 116 drivers/usb/musb/omap2430.c u32 l; l 118 drivers/usb/musb/omap2430.c l = musb_readl(musb->mregs, OTG_FORCESTDBY); l 119 drivers/usb/musb/omap2430.c l &= ~ENABLEFORCE; /* disable MSTANDBY */ l 120 drivers/usb/musb/omap2430.c musb_writel(musb->mregs, OTG_FORCESTDBY, l); l 226 drivers/usb/musb/omap2430.c u32 l; l 265 drivers/usb/musb/omap2430.c l = musb_readl(musb->mregs, OTG_INTERFSEL); l 269 drivers/usb/musb/omap2430.c l &= ~ULPI_12PIN; /* Disable ULPI */ l 270 drivers/usb/musb/omap2430.c l |= UTMI_8BIT; /* Enable UTMI */ l 272 drivers/usb/musb/omap2430.c l |= ULPI_12PIN; l 275 drivers/usb/musb/omap2430.c musb_writel(musb->mregs, OTG_INTERFSEL, l); l 327 drivers/usb/phy/phy-isp1301-omap.c u32 l; l 342 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS; l 343 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_CTRL); l 344 drivers/usb/phy/phy-isp1301-omap.c isp->last_otg_ctrl = l; l 351 drivers/usb/phy/phy-isp1301-omap.c u32 l; l 366 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS; l 367 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_CTRL); l 368 drivers/usb/phy/phy-isp1301-omap.c isp->last_otg_ctrl = l; l 613 drivers/usb/phy/phy-isp1301-omap.c u32 l; l 631 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_CTRL); l 632 drivers/usb/phy/phy-isp1301-omap.c l |= OTG_PULLUP; l 633 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_CTRL); l 812 drivers/usb/phy/phy-isp1301-omap.c u32 l; l 819 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_SYSCON_2); l 820 drivers/usb/phy/phy-isp1301-omap.c l |= OTG_EN l 830 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_SYSCON_2); l 843 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_SYSCON_2); l 844 drivers/usb/phy/phy-isp1301-omap.c l |= OTG_EN; l 845 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_SYSCON_2); l 911 drivers/usb/phy/phy-isp1301-omap.c u32 l; l 913 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS; l 914 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_CTRL); l 988 drivers/usb/phy/phy-isp1301-omap.c u32 l; l 1010 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK; l 1011 drivers/usb/phy/phy-isp1301-omap.c l &= ~OTG_CTRL_BITS; l 1012 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_CTRL); l 1038 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS; l 1039 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_CTRL); l 1349 drivers/usb/phy/phy-isp1301-omap.c u32 l; l 1351 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK; l 1352 drivers/usb/phy/phy-isp1301-omap.c l &= ~(OTG_XCEIV_OUTPUTS|OTG_CTRL_BITS); l 1353 drivers/usb/phy/phy-isp1301-omap.c l |= OTG_ID; l 1354 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_CTRL); l 1429 drivers/usb/phy/phy-isp1301-omap.c u32 l; l 1455 drivers/usb/phy/phy-isp1301-omap.c l = omap_readl(OTG_CTRL); l 1456 drivers/usb/phy/phy-isp1301-omap.c l |= OTG_A_SETB_HNPEN; l 1457 drivers/usb/phy/phy-isp1301-omap.c omap_writel(l, OTG_CTRL); l 42 drivers/usb/phy/phy-omap-otg.c u32 l; l 44 drivers/usb/phy/phy-omap-otg.c l = readl(otg_dev->base + OMAP_OTG_CTRL); l 45 drivers/usb/phy/phy-omap-otg.c l &= ~OMAP_OTG_XCEIV_OUTPUTS; l 46 drivers/usb/phy/phy-omap-otg.c l |= outputs; l 47 drivers/usb/phy/phy-omap-otg.c writel(l, otg_dev->base + OMAP_OTG_CTRL); l 1404 drivers/usb/serial/ftdi_sio.c int l = priv->latency; l 1410 drivers/usb/serial/ftdi_sio.c l = 1; l 1412 drivers/usb/serial/ftdi_sio.c dev_dbg(&port->dev, "%s: setting latency timer = %i\n", __func__, l); l 1418 drivers/usb/serial/ftdi_sio.c l, priv->interface, l 338 drivers/usb/serial/garmin_gps.c unsigned l = 0; l 361 drivers/usb/serial/garmin_gps.c l = ptr-pkt; l 363 drivers/usb/serial/garmin_gps.c send_to_tty(garmin_data_p->port, pkt, l); l 1874 drivers/vhost/vhost.c u64 start, end, l, min; l 1890 drivers/vhost/vhost.c l = end - start + 1; l 1893 drivers/vhost/vhost.c l); l 1897 drivers/vhost/vhost.c min = min(l, min); l 1952 drivers/vhost/vhost.c u64 l = min(log[i].len, len); l 1953 drivers/vhost/vhost.c r = log_write(vq->log_base, log[i].addr, l); l 1956 drivers/vhost/vhost.c len -= l; l 644 drivers/video/fbdev/amifb.c u_long l; l 2109 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(BPC0_COLOR | BPC0_SHRES | BPC0_ECSENA, bplcon0); l 2110 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(0x0181, diwstrt); l 2111 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(0x0281, diwstop); l 2112 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(0x0000, diwhigh); l 2114 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(BPC0_COLOR, bplcon0); l 2117 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(0, spr[i].pos); l 2118 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(highw(p), sprpt[i]); l 2119 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE2(loww(p), sprpt[i]); l 2122 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(IF_SETCLR | IF_COPER, intreq); l 2124 drivers/video/fbdev/amifb.c (cop++)->l = CEND; l 2125 drivers/video/fbdev/amifb.c (cop++)->l = CMOVE(0, copjmp2); l 2126 drivers/video/fbdev/amifb.c cop->l = CEND; l 2135 drivers/video/fbdev/amifb.c copdisplay.wait->l = CWAIT(32, par->diwstrt_v - 4); l 2166 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(highw(p), bplpt[i]); l 2167 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE2(loww(p), bplpt[i]); l 2171 drivers/video/fbdev/amifb.c (copl++)->l = CWAIT(h_end1, 510); l 2175 drivers/video/fbdev/amifb.c (copl++)->l = CWAIT(h_end1, line); l 2177 drivers/video/fbdev/amifb.c (copl++)->l = CWAIT(h_end2, line); l 2184 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(highw(p), bplpt[i]); l 2185 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE2(loww(p), bplpt[i]); l 2187 drivers/video/fbdev/amifb.c copl->l = CEND; l 2200 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(highw(p), bplpt[i]); l 2201 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE2(loww(p), bplpt[i]); l 2205 drivers/video/fbdev/amifb.c (cops++)->l = CWAIT(h_end1, 510); l 2209 drivers/video/fbdev/amifb.c (cops++)->l = CWAIT(h_end1, line); l 2211 drivers/video/fbdev/amifb.c (cops++)->l = CWAIT(h_end2, line); l 2223 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(highw(p), bplpt[i]); l 2224 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE2(loww(p), bplpt[i]); l 2226 drivers/video/fbdev/amifb.c cops->l = CEND; l 2245 drivers/video/fbdev/amifb.c (copl++)->l = CWAIT(0, 10); l 2246 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(par->bplcon0, bplcon0); l 2247 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(0, sprpt[0]); l 2248 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE2(0, sprpt[0]); l 2253 drivers/video/fbdev/amifb.c (cops++)->l = CWAIT(0, 10); l 2254 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(par->bplcon0, bplcon0); l 2255 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(0, sprpt[0]); l 2256 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE2(0, sprpt[0]); l 2258 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v + 1), diwstrt); l 2259 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v + 1), diwstop); l 2260 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt); l 2261 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop); l 2263 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v + 1, l 2265 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v, l 2269 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal); l 2270 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt + 1), vbstrt); l 2271 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(vbstop2hw(par->vbstop + 1), vbstop); l 2272 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal); l 2273 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt); l 2274 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop); l 2279 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(highw(p), cop2lc); l 2280 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE2(loww(p), cop2lc); l 2282 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE(highw(p), cop2lc); l 2283 drivers/video/fbdev/amifb.c (cops++)->l = CMOVE2(loww(p), cop2lc); l 2286 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt); l 2287 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop); l 2289 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v, l 2293 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal); l 2294 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt); l 2295 drivers/video/fbdev/amifb.c (copl++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop); l 41 drivers/video/fbdev/atafb_iplan2p2.c int w, l , i, j; l 63 drivers/video/fbdev/atafb_iplan2p2.c l = next_line - w * 4; l 67 drivers/video/fbdev/atafb_iplan2p2.c s = (u32 *)((u8 *)s + l); l 68 drivers/video/fbdev/atafb_iplan2p2.c d = (u32 *)((u8 *)d + l); l 89 drivers/video/fbdev/atafb_iplan2p2.c l = next_line - w * 4; l 93 drivers/video/fbdev/atafb_iplan2p2.c s = (u32 *)((u8 *)s - l); l 94 drivers/video/fbdev/atafb_iplan2p2.c d = (u32 *)((u8 *)d - l); l 41 drivers/video/fbdev/atafb_iplan2p4.c int w, l , i, j; l 63 drivers/video/fbdev/atafb_iplan2p4.c l = next_line - w * 4; l 67 drivers/video/fbdev/atafb_iplan2p4.c s = (u32 *)((u8 *)s + l); l 68 drivers/video/fbdev/atafb_iplan2p4.c d = (u32 *)((u8 *)d + l); l 89 drivers/video/fbdev/atafb_iplan2p4.c l = next_line - w * 4; l 93 drivers/video/fbdev/atafb_iplan2p4.c s = (u32 *)((u8 *)s - l); l 94 drivers/video/fbdev/atafb_iplan2p4.c d = (u32 *)((u8 *)d - l); l 48 drivers/video/fbdev/atafb_iplan2p8.c int w, l , i, j; l 70 drivers/video/fbdev/atafb_iplan2p8.c l = next_line - w * 4; l 74 drivers/video/fbdev/atafb_iplan2p8.c s = (u32 *)((u8 *)s + l); l 75 drivers/video/fbdev/atafb_iplan2p8.c d = (u32 *)((u8 *)d + l); l 96 drivers/video/fbdev/atafb_iplan2p8.c l = next_line - w * 4; l 100 drivers/video/fbdev/atafb_iplan2p8.c s = (u32 *)((u8 *)s - l); l 101 drivers/video/fbdev/atafb_iplan2p8.c d = (u32 *)((u8 *)d - l); l 162 drivers/video/fbdev/aty/mach64_cursor.c u16 l = 0xaaaa; l 168 drivers/video/fbdev/aty/mach64_cursor.c l = cursor_bits_lookup[(b ^ m) >> 4] | l 174 drivers/video/fbdev/aty/mach64_cursor.c l = cursor_bits_lookup[(b & m) >> 4] | l 184 drivers/video/fbdev/aty/mach64_cursor.c l = comp(l, 0xaaaa, l 187 drivers/video/fbdev/aty/mach64_cursor.c fb_writeb(l & 0xff, dst++); l 188 drivers/video/fbdev/aty/mach64_cursor.c fb_writeb(l >> 8, dst++); l 739 drivers/video/fbdev/aty/radeon_monitor.c int len, l, rc = 0; l 743 drivers/video/fbdev/aty/radeon_monitor.c l = strlen(model); l 746 drivers/video/fbdev/aty/radeon_monitor.c rc = memcmp(model, cp, min(len, l)) == 0; l 781 drivers/video/fbdev/controlfb.c unsigned long p0, p1, p2, k, l, m, n, min; l 787 drivers/video/fbdev/controlfb.c l = clk << p2; l 790 drivers/video/fbdev/controlfb.c for (k = 1, min = l; k < 32; k++) { l 794 drivers/video/fbdev/controlfb.c n = m / l; l 795 drivers/video/fbdev/controlfb.c rem = m % l; l 149 drivers/video/fbdev/core/cfbimgblt.c u32 i, j, l; l 158 drivers/video/fbdev/core/cfbimgblt.c l = 8; l 172 drivers/video/fbdev/core/cfbimgblt.c l--; l 173 drivers/video/fbdev/core/cfbimgblt.c color = (*s & (1 << l)) ? fgcolor : bgcolor; l 184 drivers/video/fbdev/core/cfbimgblt.c if (!l) { l = 8; s++; } l 378 drivers/video/fbdev/core/fbcon.c int l = fbcon_softback_size / vc->vc_size_row; l 380 drivers/video/fbdev/core/fbcon.c if (l > 5) l 381 drivers/video/fbdev/core/fbcon.c softback_end = softback_buf + l * vc->vc_size_row; l 21 drivers/video/fbdev/core/modedb.c #define name_matches(v, s, l) \ l 22 drivers/video/fbdev/core/modedb.c ((v).name && !strncmp((s), (v).name, (l)) && strlen((v).name) == (l)) l 121 drivers/video/fbdev/core/sysimgblt.c u32 i, j, l; l 129 drivers/video/fbdev/core/sysimgblt.c l = 8; l 143 drivers/video/fbdev/core/sysimgblt.c l--; l 144 drivers/video/fbdev/core/sysimgblt.c color = (*s & (1 << l)) ? fgcolor : bgcolor; l 155 drivers/video/fbdev/core/sysimgblt.c if (!l) { l = 8; s++; } l 891 drivers/video/fbdev/fsl-diu-fb.c size_t l = info->fix.smem_len; l 900 drivers/video/fbdev/fsl-diu-fb.c free_pages_exact(p, l); l 87 drivers/video/fbdev/matrox/matroxfb_accel.c #define mga_ydstlen(y,l) mga_outl(M_YDSTLEN | M_EXEC, ((y) << 16) | (l)) l 102 drivers/video/fbdev/nvidia/nv_local.h #define reverse_order(l) \ l 104 drivers/video/fbdev/nvidia/nv_local.h u8 *a = (u8 *)(l); \ l 111 drivers/video/fbdev/nvidia/nv_local.h #define reverse_order(l) do { } while(0) l 81 drivers/video/fbdev/omap/lcdc.c u32 l; l 83 drivers/video/fbdev/omap/lcdc.c l = omap_readl(OMAP_LCDC_CONTROL); l 84 drivers/video/fbdev/omap/lcdc.c l &= ~(3 << 20); l 87 drivers/video/fbdev/omap/lcdc.c l |= 1 << 20; l 90 drivers/video/fbdev/omap/lcdc.c l |= 2 << 20; l 97 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_CONTROL); l 102 drivers/video/fbdev/omap/lcdc.c u32 l; l 104 drivers/video/fbdev/omap/lcdc.c l = omap_readl(OMAP_LCDC_CONTROL); l 105 drivers/video/fbdev/omap/lcdc.c l |= OMAP_LCDC_CTRL_LCD_EN; l 106 drivers/video/fbdev/omap/lcdc.c l &= ~OMAP_LCDC_IRQ_MASK; l 107 drivers/video/fbdev/omap/lcdc.c l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE; /* enabled IRQs */ l 108 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_CONTROL); l 113 drivers/video/fbdev/omap/lcdc.c u32 l; l 116 drivers/video/fbdev/omap/lcdc.c l = omap_readl(OMAP_LCDC_CONTROL); l 123 drivers/video/fbdev/omap/lcdc.c l &= ~mask; l 124 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_CONTROL); l 238 drivers/video/fbdev/omap/lcdc.c u32 l; l 245 drivers/video/fbdev/omap/lcdc.c l = omap_readl(OMAP_LCDC_CONTROL); l 246 drivers/video/fbdev/omap/lcdc.c l &= ~OMAP_LCDC_IRQ_DONE; l 247 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_CONTROL); l 458 drivers/video/fbdev/omap/lcdc.c u32 l; l 464 drivers/video/fbdev/omap/lcdc.c l = omap_readl(OMAP_LCDC_CONTROL); l 465 drivers/video/fbdev/omap/lcdc.c l &= ~OMAP_LCDC_CTRL_LCD_TFT; l 466 drivers/video/fbdev/omap/lcdc.c l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0; l 470 drivers/video/fbdev/omap/lcdc.c l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0; l 473 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_CONTROL); l 475 drivers/video/fbdev/omap/lcdc.c l = omap_readl(OMAP_LCDC_TIMING2); l 476 drivers/video/fbdev/omap/lcdc.c l &= ~(((1 << 6) - 1) << 20); l 477 drivers/video/fbdev/omap/lcdc.c l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20; l 478 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_TIMING2); l 480 drivers/video/fbdev/omap/lcdc.c l = panel->x_res - 1; l 481 drivers/video/fbdev/omap/lcdc.c l |= (panel->hsw - 1) << 10; l 482 drivers/video/fbdev/omap/lcdc.c l |= (panel->hfp - 1) << 16; l 483 drivers/video/fbdev/omap/lcdc.c l |= (panel->hbp - 1) << 24; l 484 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_TIMING0); l 486 drivers/video/fbdev/omap/lcdc.c l = panel->y_res - 1; l 487 drivers/video/fbdev/omap/lcdc.c l |= (panel->vsw - 1) << 10; l 488 drivers/video/fbdev/omap/lcdc.c l |= panel->vfp << 16; l 489 drivers/video/fbdev/omap/lcdc.c l |= panel->vbp << 24; l 490 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_TIMING1); l 492 drivers/video/fbdev/omap/lcdc.c l = omap_readl(OMAP_LCDC_TIMING2); l 493 drivers/video/fbdev/omap/lcdc.c l &= ~0xff; l 508 drivers/video/fbdev/omap/lcdc.c l |= pcd & 0xff; l 509 drivers/video/fbdev/omap/lcdc.c l |= panel->acb << 8; l 510 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_TIMING2); l 673 drivers/video/fbdev/omap/lcdc.c u32 l; l 682 drivers/video/fbdev/omap/lcdc.c l = 0; l 683 drivers/video/fbdev/omap/lcdc.c omap_writel(l, OMAP_LCDC_CONTROL); l 210 drivers/video/fbdev/omap/sossi.c u32 l; l 219 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_INIT1_REG); l 220 drivers/video/fbdev/omap/sossi.c l &= ~((0x0f << 20) | (0x3f << 24)); l 221 drivers/video/fbdev/omap/sossi.c l |= (tw0 << 20) | (tw1 << 24); l 222 drivers/video/fbdev/omap/sossi.c sossi_write_reg(SOSSI_INIT1_REG, l); l 228 drivers/video/fbdev/omap/sossi.c u32 l; l 230 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_INIT3_REG); l 231 drivers/video/fbdev/omap/sossi.c l &= ~0x3ff; l 232 drivers/video/fbdev/omap/sossi.c l |= ((bus_pick_count - 1) << 5) | ((bus_pick_width - 1) & 0x1f); l 233 drivers/video/fbdev/omap/sossi.c sossi_write_reg(SOSSI_INIT3_REG, l); l 238 drivers/video/fbdev/omap/sossi.c u32 l; l 240 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_TEARING_REG); l 241 drivers/video/fbdev/omap/sossi.c l &= ~(((1 << 11) - 1) << 15); l 242 drivers/video/fbdev/omap/sossi.c l |= line << 15; l 243 drivers/video/fbdev/omap/sossi.c l &= ~(0x3 << 26); l 244 drivers/video/fbdev/omap/sossi.c l |= mode << 26; l 245 drivers/video/fbdev/omap/sossi.c sossi_write_reg(SOSSI_TEARING_REG, l); l 386 drivers/video/fbdev/omap/sossi.c u32 l; l 407 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_TEARING_REG); l 408 drivers/video/fbdev/omap/sossi.c l &= ~((1 << 15) - 1); l 409 drivers/video/fbdev/omap/sossi.c l |= vs << 3; l 410 drivers/video/fbdev/omap/sossi.c l |= hs; l 412 drivers/video/fbdev/omap/sossi.c l |= 1 << 29; l 414 drivers/video/fbdev/omap/sossi.c l &= ~(1 << 29); l 416 drivers/video/fbdev/omap/sossi.c l |= 1 << 28; l 418 drivers/video/fbdev/omap/sossi.c l &= ~(1 << 28); l 419 drivers/video/fbdev/omap/sossi.c sossi_write_reg(SOSSI_TEARING_REG, l); l 560 drivers/video/fbdev/omap/sossi.c u32 l, k; l 595 drivers/video/fbdev/omap/sossi.c l = omap_readl(MOD_CONF_CTRL_1); l 596 drivers/video/fbdev/omap/sossi.c l |= CONF_SOSSI_RESET_R; l 597 drivers/video/fbdev/omap/sossi.c omap_writel(l, MOD_CONF_CTRL_1); l 598 drivers/video/fbdev/omap/sossi.c l &= ~CONF_SOSSI_RESET_R; l 599 drivers/video/fbdev/omap/sossi.c omap_writel(l, MOD_CONF_CTRL_1); l 602 drivers/video/fbdev/omap/sossi.c l = omap_readl(ARM_IDLECT2); l 603 drivers/video/fbdev/omap/sossi.c l &= ~(1 << 8); /* DMACK_REQ */ l 604 drivers/video/fbdev/omap/sossi.c omap_writel(l, ARM_IDLECT2); l 606 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_INIT2_REG); l 608 drivers/video/fbdev/omap/sossi.c l |= (1 << 0) | (1 << 1); l 609 drivers/video/fbdev/omap/sossi.c sossi_write_reg(SOSSI_INIT2_REG, l); l 611 drivers/video/fbdev/omap/sossi.c l &= ~(1 << 1); l 612 drivers/video/fbdev/omap/sossi.c sossi_write_reg(SOSSI_INIT2_REG, l); l 615 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_ID_REG); l 618 drivers/video/fbdev/omap/sossi.c if (l != 0x55555555 || k != 0xaaaaaaaa) { l 620 drivers/video/fbdev/omap/sossi.c "invalid SoSSI sync pattern: %08x, %08x\n", l, k); l 631 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_ID_REG); /* Component code */ l 632 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_ID_REG); l 634 drivers/video/fbdev/omap/sossi.c l >> 16, l & 0xffff); l 636 drivers/video/fbdev/omap/sossi.c l = sossi_read_reg(SOSSI_INIT1_REG); l 637 drivers/video/fbdev/omap/sossi.c l |= (1 << 19); /* DMA_MODE */ l 638 drivers/video/fbdev/omap/sossi.c l &= ~(1 << 31); /* REORDERING */ l 639 drivers/video/fbdev/omap/sossi.c sossi_write_reg(SOSSI_INIT1_REG, l); l 177 drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c int r, l, bytes_read; l 182 drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c l = min(EDID_LENGTH, len); l 183 drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0); l 187 drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c bytes_read = l; l 191 drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c l = min(EDID_LENGTH, len - EDID_LENGTH); l 194 drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c l, EDID_LENGTH); l 198 drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c bytes_read += l; l 1584 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 l; l 1589 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); l 1592 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l &= ~((0x3 << 5) | (0x1 << 21)); l 1593 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l |= (orig_width != out_width) ? (1 << 5) : 0; l 1594 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l |= (orig_height != out_height) ? (1 << 6) : 0; l 1595 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l |= five_taps ? (1 << 21) : 0; l 1599 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l &= ~(0x3 << 7); l 1600 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l |= (orig_width <= out_width) ? 0 : (1 << 7); l 1601 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l |= (orig_height <= out_height) ? 0 : (1 << 8); l 1606 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l &= ~(0x1 << 22); l 1607 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l |= five_taps ? (1 << 22) : 0; l 1610 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l); l 2812 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 l; l 2851 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); l 2852 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */ l 2853 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */ l 2855 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */ l 2857 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */ l 2858 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l); l 3030 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 l; l 3051 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = dispc_read_reg(DISPC_CONTROL); l 3052 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_MOD(l, gpout0, 15, 15); l 3053 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_MOD(l, gpout1, 16, 16); l 3054 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_CONTROL, l); l 3140 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 timing_h, timing_v, l; l 3211 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_VAL(onoff, 17, 17) | l 3220 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l |= (1 << 18); l 3222 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_POL_FREQ(channel), l); l 3304 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 l; l 3305 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = dispc_read_reg(DISPC_DIVISORo(channel)); l 3306 drivers/video/fbdev/omap2/omapfb/dss/dispc.c *lck_div = FLD_GET(l, 23, 16); l 3307 drivers/video/fbdev/omap2/omapfb/dss/dispc.c *pck_div = FLD_GET(l, 7, 0); l 3346 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 l; l 3349 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = dispc_read_reg(DISPC_DIVISORo(channel)); l 3351 drivers/video/fbdev/omap2/omapfb/dss/dispc.c lcd = FLD_GET(l, 23, 16); l 3388 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 l; l 3390 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = dispc_read_reg(DISPC_DIVISORo(channel)); l 3392 drivers/video/fbdev/omap2/omapfb/dss/dispc.c pcd = FLD_GET(l, 7, 0); l 3460 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 l; l 3476 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = dispc_read_reg(DISPC_DIVISOR); l 3477 drivers/video/fbdev/omap2/omapfb/dss/dispc.c lcd = FLD_GET(l, 23, 16); l 3829 drivers/video/fbdev/omap2/omapfb/dss/dispc.c u32 l; l 3833 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = dispc_read_reg(DISPC_DIVISOR); l 3835 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_MOD(l, 1, 0, 0); l 3836 drivers/video/fbdev/omap2/omapfb/dss/dispc.c l = FLD_MOD(l, 1, 23, 16); l 3837 drivers/video/fbdev/omap2/omapfb/dss/dispc.c dispc_write_reg(DISPC_DIVISOR, l); l 202 drivers/video/fbdev/omap2/omapfb/dss/display.c struct list_head *l; l 221 drivers/video/fbdev/omap2/omapfb/dss/display.c list_for_each(l, &panel_list) { l 222 drivers/video/fbdev/omap2/omapfb/dss/display.c dssdev = list_entry(l, struct omap_dss_device, panel_list); l 224 drivers/video/fbdev/omap2/omapfb/dss/display.c if (list_is_last(l, &panel_list)) { l 229 drivers/video/fbdev/omap2/omapfb/dss/display.c dssdev = list_entry(l->next, struct omap_dss_device, l 1178 drivers/video/fbdev/omap2/omapfb/dss/dsi.c u32 l; l 1184 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); l 1946 drivers/video/fbdev/omap2/omapfb/dss/dsi.c u32 l; l 1949 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = 0; l 1955 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l |= 1 << (i * 2 + (p ? 0 : 1)); l 1958 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l |= 1 << (i * 2 + (p ? 1 : 0)); l 1973 drivers/video/fbdev/omap2/omapfb/dss/dsi.c REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17); l 2009 drivers/video/fbdev/omap2/omapfb/dss/dsi.c u32 l; l 2012 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); l 2016 drivers/video/fbdev/omap2/omapfb/dss/dsi.c if (!in_use[i] || (l & (1 << offsets[i]))) l 2025 drivers/video/fbdev/omap2/omapfb/dss/dsi.c if (!in_use[i] || (l & (1 << offsets[i]))) l 2057 drivers/video/fbdev/omap2/omapfb/dss/dsi.c u32 l; l 2083 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = dsi_read_reg(dsidev, DSI_TIMING1); l 2084 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ l 2085 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */ l 2086 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */ l 2087 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ l 2088 drivers/video/fbdev/omap2/omapfb/dss/dsi.c dsi_write_reg(dsidev, DSI_TIMING1, l); l 3907 drivers/video/fbdev/omap2/omapfb/dss/dsi.c u32 l; l 3936 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ l 3937 drivers/video/fbdev/omap2/omapfb/dss/dsi.c dsi_write_reg(dsidev, DSI_VC_TE(channel), l); l 3943 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ l 3945 drivers/video/fbdev/omap2/omapfb/dss/dsi.c l = FLD_MOD(l, 1, 31, 31); /* TE_START */ l 3946 drivers/video/fbdev/omap2/omapfb/dss/dsi.c dsi_write_reg(dsidev, DSI_VC_TE(channel), l); l 261 drivers/video/fbdev/omap2/omapfb/dss/dss.c u32 l; l 265 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = dss_read_reg(DSS_SDI_CONTROL); l 266 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */ l 267 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */ l 268 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */ l 269 drivers/video/fbdev/omap2/omapfb/dss/dss.c dss_write_reg(DSS_SDI_CONTROL, l); l 271 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = dss_read_reg(DSS_PLL_CONTROL); l 272 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */ l 273 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */ l 274 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */ l 275 drivers/video/fbdev/omap2/omapfb/dss/dss.c dss_write_reg(DSS_PLL_CONTROL, l); l 606 drivers/video/fbdev/omap2/omapfb/dss/dss.c int l = 0; l 609 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = 0; l 611 drivers/video/fbdev/omap2/omapfb/dss/dss.c l = 1; l 616 drivers/video/fbdev/omap2/omapfb/dss/dss.c REG_FLD_MOD(DSS_CONTROL, l, 6, 6); l 162 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c int r, l; l 175 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c l = 128; l 181 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c l += 128; l 184 drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c return l; l 134 drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c u32 l = 0; l 139 drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c l |= FLD_VAL(video_fmt->y_res, 31, 16); l 140 drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c l |= FLD_VAL(video_fmt->x_res, 15, 0); l 141 drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c hdmi_write_reg(wp->base, HDMI_WP_VIDEO_SIZE, l); l 35 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c int l = 0, total = 0; l 38 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c for (i = 0; total < prop->length; total += l, p += l, i++) l 39 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c l = strlen(p) + 1; l 66 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c size_t l = strlen(src) + 1; l 72 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c dst += l; l 74 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c src += l; l 75 drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c total += l; l 244 drivers/video/fbdev/omap2/omapfb/dss/pll.c u32 l; l 246 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = 0; l 248 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 1, 0, 0); /* PLL_STOPMODE */ l 249 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->n - 1, hw->n_msb, hw->n_lsb); /* PLL_REGN */ l 250 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->m, hw->m_msb, hw->m_lsb); /* PLL_REGM */ l 252 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[0] ? cinfo->mX[0] - 1 : 0, l 255 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[1] ? cinfo->mX[1] - 1 : 0, l 257 drivers/video/fbdev/omap2/omapfb/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION1); l 259 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = 0; l 261 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[2] ? cinfo->mX[2] - 1 : 0, l 264 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[3] ? cinfo->mX[3] - 1 : 0, l 266 drivers/video/fbdev/omap2/omapfb/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION3); l 268 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION2); l 276 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, f, 4, 1); /* PLL_FREQSEL */ l 280 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */ l 282 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 1, 13, 13); /* PLL_REFEN */ l 283 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0, 14, 14); /* PHY_CLKINEN */ l 284 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0, 16, 16); /* M4_CLOCK_EN */ l 285 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0, 18, 18); /* M5_CLOCK_EN */ l 286 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 1, 20, 20); /* HSDIVBYPASS */ l 288 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 3, 22, 21); /* REFSEL = sysclk */ l 289 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0, 23, 23); /* M6_CLOCK_EN */ l 290 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0, 25, 25); /* M7_CLOCK_EN */ l 291 drivers/video/fbdev/omap2/omapfb/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION2); l 307 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION2); l 308 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 1, 14, 14); /* PHY_CLKINEN */ l 309 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[0] ? 1 : 0, 16, 16); /* M4_CLOCK_EN */ l 310 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[1] ? 1 : 0, 18, 18); /* M5_CLOCK_EN */ l 311 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0, 20, 20); /* HSDIVBYPASS */ l 312 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[2] ? 1 : 0, 23, 23); /* M6_CLOCK_EN */ l 313 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[3] ? 1 : 0, 25, 25); /* M7_CLOCK_EN */ l 314 drivers/video/fbdev/omap2/omapfb/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION2); l 335 drivers/video/fbdev/omap2/omapfb/dss/pll.c u32 l; l 337 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = 0; l 338 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->m, 20, 9); /* PLL_REGM */ l 339 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->n - 1, 8, 1); /* PLL_REGN */ l 340 drivers/video/fbdev/omap2/omapfb/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION1); l 342 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION2); l 343 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0x0, 12, 12); /* PLL_HIGHFREQ divide by 2 */ l 344 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0x1, 13, 13); /* PLL_REFEN */ l 345 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0x0, 14, 14); /* PHY_CLKINEN */ l 347 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0x3, 22, 21); /* REFSEL = SYSCLK */ l 351 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0x4, 3, 1); l 353 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, 0x2, 3, 1); l 354 drivers/video/fbdev/omap2/omapfb/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION2); l 356 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION3); l 357 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->sd, 17, 10); /* PLL_REGSD */ l 358 drivers/video/fbdev/omap2/omapfb/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION3); l 360 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = readl_relaxed(base + PLL_CONFIGURATION4); l 361 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mX[0], 24, 18); /* PLL_REGM2 */ l 362 drivers/video/fbdev/omap2/omapfb/dss/pll.c l = FLD_MOD(l, cinfo->mf, 17, 0); /* PLL_REGM_F */ l 363 drivers/video/fbdev/omap2/omapfb/dss/pll.c writel_relaxed(l, base + PLL_CONFIGURATION4); l 308 drivers/video/fbdev/omap2/omapfb/dss/venc.c u32 l = __raw_readl(venc.base + idx); l 309 drivers/video/fbdev/omap2/omapfb/dss/venc.c return l; l 424 drivers/video/fbdev/omap2/omapfb/dss/venc.c u32 l; l 437 drivers/video/fbdev/omap2/omapfb/dss/venc.c l = 0; l 440 drivers/video/fbdev/omap2/omapfb/dss/venc.c l |= 1 << 1; l 442 drivers/video/fbdev/omap2/omapfb/dss/venc.c l |= (1 << 0) | (1 << 2); l 445 drivers/video/fbdev/omap2/omapfb/dss/venc.c l |= 1 << 3; l 447 drivers/video/fbdev/omap2/omapfb/dss/venc.c venc_write_reg(VENC_OUTPUT_CONTROL, l); l 136 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c ssize_t l = 0; l 150 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", l 154 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c l += snprintf(buf + l, PAGE_SIZE - l, "\n"); l 159 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c return l; l 325 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c ssize_t l = 0; l 331 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c l += snprintf(buf + l, PAGE_SIZE - l, "%s%d", l 335 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c l += snprintf(buf + l, PAGE_SIZE - l, "\n"); l 339 drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c return l; l 660 drivers/video/fbdev/ps3fb.c unsigned int l = min(lines, maxlines); l 661 drivers/video/fbdev/ps3fb.c ps3fb_sync_image(info->device, 0, dst, 0, vmode->xres, l, l 663 drivers/video/fbdev/ps3fb.c lines -= l; l 454 drivers/video/fbdev/riva/fbdev.c static inline void reverse_order(u32 *l) l 456 drivers/video/fbdev/riva/fbdev.c u8 *a = (u8 *)l; l 115 drivers/video/fbdev/savage/savagefb.h #define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF) l 116 drivers/video/fbdev/savage/savagefb.h #define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF) l 4634 drivers/video/fbdev/sis/sis_main.c int i, j, k, l, status; l 4861 drivers/video/fbdev/sis/sis_main.c l = channelab; l 4862 drivers/video/fbdev/sis/sis_main.c if(l == 3) l = 4; l 4863 drivers/video/fbdev/sis/sis_main.c if((ranksize * l) <= 256) { l 154 drivers/virtio/virtio_mmio.c __le32 l; l 175 drivers/virtio/virtio_mmio.c l = cpu_to_le32(readl(base + offset)); l 176 drivers/virtio/virtio_mmio.c memcpy(buf, &l, sizeof l); l 179 drivers/virtio/virtio_mmio.c l = cpu_to_le32(readl(base + offset)); l 180 drivers/virtio/virtio_mmio.c memcpy(buf, &l, sizeof l); l 181 drivers/virtio/virtio_mmio.c l = cpu_to_le32(ioread32(base + offset + sizeof l)); l 182 drivers/virtio/virtio_mmio.c memcpy(buf + sizeof l, &l, sizeof l); l 196 drivers/virtio/virtio_mmio.c __le32 l; l 218 drivers/virtio/virtio_mmio.c memcpy(&l, buf, sizeof l); l 219 drivers/virtio/virtio_mmio.c writel(le32_to_cpu(l), base + offset); l 222 drivers/virtio/virtio_mmio.c memcpy(&l, buf, sizeof l); l 223 drivers/virtio/virtio_mmio.c writel(le32_to_cpu(l), base + offset); l 224 drivers/virtio/virtio_mmio.c memcpy(&l, buf + sizeof l, sizeof l); l 225 drivers/virtio/virtio_mmio.c writel(le32_to_cpu(l), base + offset + sizeof l); l 196 drivers/virtio/virtio_pci_modern.c __le32 l; l 210 drivers/virtio/virtio_pci_modern.c l = cpu_to_le32(ioread32(vp_dev->device + offset)); l 211 drivers/virtio/virtio_pci_modern.c memcpy(buf, &l, sizeof l); l 214 drivers/virtio/virtio_pci_modern.c l = cpu_to_le32(ioread32(vp_dev->device + offset)); l 215 drivers/virtio/virtio_pci_modern.c memcpy(buf, &l, sizeof l); l 216 drivers/virtio/virtio_pci_modern.c l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); l 217 drivers/virtio/virtio_pci_modern.c memcpy(buf + sizeof l, &l, sizeof l); l 232 drivers/virtio/virtio_pci_modern.c __le32 l; l 246 drivers/virtio/virtio_pci_modern.c memcpy(&l, buf, sizeof l); l 247 drivers/virtio/virtio_pci_modern.c iowrite32(le32_to_cpu(l), vp_dev->device + offset); l 250 drivers/virtio/virtio_pci_modern.c memcpy(&l, buf, sizeof l); l 251 drivers/virtio/virtio_pci_modern.c iowrite32(le32_to_cpu(l), vp_dev->device + offset); l 252 drivers/virtio/virtio_pci_modern.c memcpy(&l, buf + sizeof l, sizeof l); l 253 drivers/virtio/virtio_pci_modern.c iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); l 33 drivers/xen/time.c u32 h, l, h2; l 43 drivers/xen/time.c l = READ_ONCE(p32[0]); l 47 drivers/xen/time.c ret = (((u64)h) << 32) | l; l 59 drivers/xen/xen-pciback/vpci.c static inline int match_slot(struct pci_dev *l, struct pci_dev *r) l 61 drivers/xen/xen-pciback/vpci.c if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus) l 62 drivers/xen/xen-pciback/vpci.c && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn)) l 588 drivers/xen/xen-pciback/xenbus.c int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i); l 589 drivers/xen/xen-pciback/xenbus.c if (unlikely(l >= (sizeof(dev_str) - 1))) { l 617 drivers/xen/xen-pciback/xenbus.c l = snprintf(state_str, sizeof(state_str), "state-%d", i); l 618 drivers/xen/xen-pciback/xenbus.c if (unlikely(l >= (sizeof(state_str) - 1))) { l 84 drivers/xen/xen-scsiback.c struct list_head l; l 635 drivers/xen/xen-scsiback.c list_for_each_entry(entry, head, l) { l 880 drivers/xen/xen-scsiback.c list_for_each_entry(entry, head, l) l 967 drivers/xen/xen-scsiback.c list_add_tail(&new->l, &info->v2p_entry_lists); l 985 drivers/xen/xen-scsiback.c list_del(&entry->l); l 1216 drivers/xen/xen-scsiback.c list_for_each_entry_safe(entry, tmp, head, l) l 106 fs/9p/fid.c int i, n, l, clone, access; l 166 fs/9p/fid.c l = min(n - i, P9_MAXWELEM); l 171 fs/9p/fid.c fid = p9_client_walk(fid, l, &wnames[i], clone); l 185 fs/9p/fid.c i += l; l 93 fs/binfmt_misc.c struct list_head *l; l 96 fs/binfmt_misc.c list_for_each(l, &entries) { l 97 fs/binfmt_misc.c Node *e = list_entry(l, Node, list); l 271 fs/btrfs/check-integrity.c struct btrfsic_block_link *l, l 273 fs/btrfs/check-integrity.c static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l); l 343 fs/btrfs/check-integrity.c const struct btrfsic_block_link *l); l 345 fs/btrfs/check-integrity.c const struct btrfsic_block_link *l); l 429 fs/btrfs/check-integrity.c static void btrfsic_block_link_init(struct btrfsic_block_link *l) l 431 fs/btrfs/check-integrity.c l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER; l 432 fs/btrfs/check-integrity.c l->ref_cnt = 1; l 433 fs/btrfs/check-integrity.c INIT_LIST_HEAD(&l->node_ref_to); l 434 fs/btrfs/check-integrity.c INIT_LIST_HEAD(&l->node_ref_from); l 435 fs/btrfs/check-integrity.c INIT_LIST_HEAD(&l->collision_resolving_node); l 436 fs/btrfs/check-integrity.c l->block_ref_to = NULL; l 437 fs/btrfs/check-integrity.c l->block_ref_from = NULL; l 442 fs/btrfs/check-integrity.c struct btrfsic_block_link *l; l 444 fs/btrfs/check-integrity.c l = kzalloc(sizeof(*l), GFP_NOFS); l 445 fs/btrfs/check-integrity.c if (NULL != l) l 446 fs/btrfs/check-integrity.c btrfsic_block_link_init(l); l 448 fs/btrfs/check-integrity.c return l; l 451 fs/btrfs/check-integrity.c static void btrfsic_block_link_free(struct btrfsic_block_link *l) l 453 fs/btrfs/check-integrity.c BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num)); l 454 fs/btrfs/check-integrity.c kfree(l); l 541 fs/btrfs/check-integrity.c struct btrfsic_block_link *l, l 545 fs/btrfs/check-integrity.c (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^ l 546 fs/btrfs/check-integrity.c ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^ l 547 fs/btrfs/check-integrity.c ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ l 548 fs/btrfs/check-integrity.c ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev))) l 551 fs/btrfs/check-integrity.c BUG_ON(NULL == l->block_ref_to); l 552 fs/btrfs/check-integrity.c BUG_ON(NULL == l->block_ref_from); l 553 fs/btrfs/check-integrity.c list_add(&l->collision_resolving_node, h->table + hashval); l 556 fs/btrfs/check-integrity.c static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l) l 558 fs/btrfs/check-integrity.c list_del(&l->collision_resolving_node); l 574 fs/btrfs/check-integrity.c struct btrfsic_block_link *l; l 576 fs/btrfs/check-integrity.c list_for_each_entry(l, h->table + hashval, collision_resolving_node) { l 577 fs/btrfs/check-integrity.c BUG_ON(NULL == l->block_ref_to); l 578 fs/btrfs/check-integrity.c BUG_ON(NULL == l->block_ref_from); l 579 fs/btrfs/check-integrity.c if (l->block_ref_to->dev_state->bdev == bdev_ref_to && l 580 fs/btrfs/check-integrity.c l->block_ref_to->dev_bytenr == dev_bytenr_ref_to && l 581 fs/btrfs/check-integrity.c l->block_ref_from->dev_state->bdev == bdev_ref_from && l 582 fs/btrfs/check-integrity.c l->block_ref_from->dev_bytenr == dev_bytenr_ref_from) l 583 fs/btrfs/check-integrity.c return l; l 711 fs/btrfs/check-integrity.c struct btrfsic_block_link *l; l 730 fs/btrfs/check-integrity.c l = btrfsic_block_link_hashtable_lookup( l 737 fs/btrfs/check-integrity.c BUG_ON(NULL == l); l 881 fs/btrfs/check-integrity.c struct btrfsic_block_link *l; l 905 fs/btrfs/check-integrity.c l = btrfsic_block_link_lookup_or_add( l 910 fs/btrfs/check-integrity.c if (NULL == l) { l 1242 fs/btrfs/check-integrity.c struct btrfsic_block_link *l; l 1285 fs/btrfs/check-integrity.c l = NULL; l 1308 fs/btrfs/check-integrity.c l = btrfsic_block_link_hashtable_lookup( l 1317 fs/btrfs/check-integrity.c if (NULL == l) { l 1318 fs/btrfs/check-integrity.c l = btrfsic_block_link_alloc(); l 1319 fs/btrfs/check-integrity.c if (NULL == l) { l 1327 fs/btrfs/check-integrity.c l->block_ref_to = next_block; l 1328 fs/btrfs/check-integrity.c l->block_ref_from = block; l 1329 fs/btrfs/check-integrity.c l->ref_cnt = 1; l 1330 fs/btrfs/check-integrity.c l->parent_generation = parent_generation; l 1333 fs/btrfs/check-integrity.c btrfsic_print_add_link(state, l); l 1335 fs/btrfs/check-integrity.c list_add(&l->node_ref_to, &block->ref_to_list); l 1336 fs/btrfs/check-integrity.c list_add(&l->node_ref_from, &next_block->ref_from_list); l 1338 fs/btrfs/check-integrity.c btrfsic_block_link_hashtable_add(l, l 1343 fs/btrfs/check-integrity.c l->ref_cnt++; l 1344 fs/btrfs/check-integrity.c l->parent_generation = parent_generation; l 1346 fs/btrfs/check-integrity.c btrfsic_print_add_link(state, l); l 1381 fs/btrfs/check-integrity.c struct btrfsic_block_link *l; l 1498 fs/btrfs/check-integrity.c l = btrfsic_block_link_lookup_or_add(state, l 1503 fs/btrfs/check-integrity.c if (NULL == l) l 1667 fs/btrfs/check-integrity.c const struct btrfsic_block_link *l; l 1674 fs/btrfs/check-integrity.c list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) { l 1679 fs/btrfs/check-integrity.c l->ref_cnt, l 1680 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 1681 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 1682 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l 1683 fs/btrfs/check-integrity.c l->block_ref_to->dev_bytenr, l 1684 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num); l 1687 fs/btrfs/check-integrity.c list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) { l 1692 fs/btrfs/check-integrity.c l->ref_cnt, l 1693 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_from), l 1694 fs/btrfs/check-integrity.c l->block_ref_from->logical_bytenr, l 1695 fs/btrfs/check-integrity.c l->block_ref_from->dev_state->name, l 1696 fs/btrfs/check-integrity.c l->block_ref_from->dev_bytenr, l 1697 fs/btrfs/check-integrity.c l->block_ref_from->mirror_num); l 1773 fs/btrfs/check-integrity.c struct btrfsic_block_link *l, *tmp; l 1878 fs/btrfs/check-integrity.c list_for_each_entry_safe(l, tmp, &block->ref_to_list, l 1881 fs/btrfs/check-integrity.c btrfsic_print_rem_link(state, l); l 1882 fs/btrfs/check-integrity.c l->ref_cnt--; l 1883 fs/btrfs/check-integrity.c if (0 == l->ref_cnt) { l 1884 fs/btrfs/check-integrity.c list_del(&l->node_ref_to); l 1885 fs/btrfs/check-integrity.c list_del(&l->node_ref_from); l 1886 fs/btrfs/check-integrity.c btrfsic_block_link_hashtable_remove(l); l 1887 fs/btrfs/check-integrity.c btrfsic_block_link_free(l); l 2219 fs/btrfs/check-integrity.c struct btrfsic_block_link *l; l 2298 fs/btrfs/check-integrity.c l = btrfsic_block_link_lookup_or_add( l 2305 fs/btrfs/check-integrity.c if (NULL == l) l 2320 fs/btrfs/check-integrity.c const struct btrfsic_block_link *l; l 2347 fs/btrfs/check-integrity.c list_for_each_entry(l, &block->ref_to_list, node_ref_to) { l 2354 fs/btrfs/check-integrity.c l->ref_cnt, l 2355 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 2356 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 2357 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l 2358 fs/btrfs/check-integrity.c l->block_ref_to->dev_bytenr, l 2359 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num); l 2360 fs/btrfs/check-integrity.c if (l->block_ref_to->never_written) { l 2362 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 2363 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 2364 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l 2365 fs/btrfs/check-integrity.c l->block_ref_to->dev_bytenr, l 2366 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num); l 2368 fs/btrfs/check-integrity.c } else if (!l->block_ref_to->is_iodone) { l 2370 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 2371 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 2372 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l 2373 fs/btrfs/check-integrity.c l->block_ref_to->dev_bytenr, l 2374 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num); l 2376 fs/btrfs/check-integrity.c } else if (l->block_ref_to->iodone_w_error) { l 2378 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 2379 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 2380 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l 2381 fs/btrfs/check-integrity.c l->block_ref_to->dev_bytenr, l 2382 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num); l 2384 fs/btrfs/check-integrity.c } else if (l->parent_generation != l 2385 fs/btrfs/check-integrity.c l->block_ref_to->generation && l 2387 fs/btrfs/check-integrity.c l->parent_generation && l 2389 fs/btrfs/check-integrity.c l->block_ref_to->generation) { l 2391 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 2392 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 2393 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l 2394 fs/btrfs/check-integrity.c l->block_ref_to->dev_bytenr, l 2395 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num, l 2396 fs/btrfs/check-integrity.c l->block_ref_to->generation, l 2397 fs/btrfs/check-integrity.c l->parent_generation); l 2399 fs/btrfs/check-integrity.c } else if (l->block_ref_to->flush_gen > l 2400 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->last_flush_gen) { l 2402 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 2403 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 2404 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l 2405 fs/btrfs/check-integrity.c l->block_ref_to->dev_bytenr, l 2406 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num, block->flush_gen, l 2407 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->last_flush_gen); l 2410 fs/btrfs/check-integrity.c l->block_ref_to, l 2425 fs/btrfs/check-integrity.c const struct btrfsic_block_link *l; l 2439 fs/btrfs/check-integrity.c list_for_each_entry(l, &block->ref_from_list, node_ref_from) { l 2446 fs/btrfs/check-integrity.c l->ref_cnt, l 2447 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_from), l 2448 fs/btrfs/check-integrity.c l->block_ref_from->logical_bytenr, l 2449 fs/btrfs/check-integrity.c l->block_ref_from->dev_state->name, l 2450 fs/btrfs/check-integrity.c l->block_ref_from->dev_bytenr, l 2451 fs/btrfs/check-integrity.c l->block_ref_from->mirror_num); l 2452 fs/btrfs/check-integrity.c if (l->block_ref_from->is_superblock && l 2454 fs/btrfs/check-integrity.c l->block_ref_from->dev_bytenr && l 2456 fs/btrfs/check-integrity.c l->block_ref_from->dev_state->bdev) l 2459 fs/btrfs/check-integrity.c l->block_ref_from, l 2469 fs/btrfs/check-integrity.c const struct btrfsic_block_link *l) l 2472 fs/btrfs/check-integrity.c l->ref_cnt, l 2473 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_from), l 2474 fs/btrfs/check-integrity.c l->block_ref_from->logical_bytenr, l 2475 fs/btrfs/check-integrity.c l->block_ref_from->dev_state->name, l 2476 fs/btrfs/check-integrity.c l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, l 2477 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 2478 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 2479 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, l 2480 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num); l 2484 fs/btrfs/check-integrity.c const struct btrfsic_block_link *l) l 2487 fs/btrfs/check-integrity.c l->ref_cnt, l 2488 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_from), l 2489 fs/btrfs/check-integrity.c l->block_ref_from->logical_bytenr, l 2490 fs/btrfs/check-integrity.c l->block_ref_from->dev_state->name, l 2491 fs/btrfs/check-integrity.c l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, l 2492 fs/btrfs/check-integrity.c btrfsic_get_block_type(state, l->block_ref_to), l 2493 fs/btrfs/check-integrity.c l->block_ref_to->logical_bytenr, l 2494 fs/btrfs/check-integrity.c l->block_ref_to->dev_state->name, l->block_ref_to->dev_bytenr, l 2495 fs/btrfs/check-integrity.c l->block_ref_to->mirror_num); l 2522 fs/btrfs/check-integrity.c const struct btrfsic_block_link *l; l 2557 fs/btrfs/check-integrity.c list_for_each_entry(l, &block->ref_to_list, node_ref_to) { l 2562 fs/btrfs/check-integrity.c if (l->ref_cnt > 1) l 2563 fs/btrfs/check-integrity.c indent_add = sprintf(buf, " %d*--> ", l->ref_cnt); l 2575 fs/btrfs/check-integrity.c btrfsic_dump_tree_sub(state, l->block_ref_to, l 2588 fs/btrfs/check-integrity.c struct btrfsic_block_link *l; l 2590 fs/btrfs/check-integrity.c l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev, l 2595 fs/btrfs/check-integrity.c if (NULL == l) { l 2596 fs/btrfs/check-integrity.c l = btrfsic_block_link_alloc(); l 2597 fs/btrfs/check-integrity.c if (NULL == l) { l 2602 fs/btrfs/check-integrity.c l->block_ref_to = next_block; l 2603 fs/btrfs/check-integrity.c l->block_ref_from = from_block; l 2604 fs/btrfs/check-integrity.c l->ref_cnt = 1; l 2605 fs/btrfs/check-integrity.c l->parent_generation = parent_generation; l 2608 fs/btrfs/check-integrity.c btrfsic_print_add_link(state, l); l 2610 fs/btrfs/check-integrity.c list_add(&l->node_ref_to, &from_block->ref_to_list); l 2611 fs/btrfs/check-integrity.c list_add(&l->node_ref_from, &next_block->ref_from_list); l 2613 fs/btrfs/check-integrity.c btrfsic_block_link_hashtable_add(l, l 2616 fs/btrfs/check-integrity.c l->ref_cnt++; l 2617 fs/btrfs/check-integrity.c l->parent_generation = parent_generation; l 2619 fs/btrfs/check-integrity.c btrfsic_print_add_link(state, l); l 2622 fs/btrfs/check-integrity.c return l; l 3013 fs/btrfs/check-integrity.c struct btrfsic_block_link *l, *tmp; l 3015 fs/btrfs/check-integrity.c list_for_each_entry_safe(l, tmp, &b_all->ref_to_list, l 3018 fs/btrfs/check-integrity.c btrfsic_print_rem_link(state, l); l 3020 fs/btrfs/check-integrity.c l->ref_cnt--; l 3021 fs/btrfs/check-integrity.c if (0 == l->ref_cnt) l 3022 fs/btrfs/check-integrity.c btrfsic_block_link_free(l); l 3581 fs/btrfs/ctree.c static int leaf_space_used(struct extent_buffer *l, int start, int nr) l 3587 fs/btrfs/ctree.c int nritems = btrfs_header_nritems(l); l 3592 fs/btrfs/ctree.c btrfs_init_map_token(&token, l); l 3595 fs/btrfs/ctree.c data_len = btrfs_token_item_offset(l, start_item, &token) + l 3596 fs/btrfs/ctree.c btrfs_token_item_size(l, start_item, &token); l 3597 fs/btrfs/ctree.c data_len = data_len - btrfs_token_item_offset(l, end_item, &token); l 4083 fs/btrfs/ctree.c struct extent_buffer *l, l 4096 fs/btrfs/ctree.c data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l); l 4098 fs/btrfs/ctree.c copy_extent_buffer(right, l, btrfs_item_nr_offset(0), l 4102 fs/btrfs/ctree.c copy_extent_buffer(right, l, l 4105 fs/btrfs/ctree.c leaf_data_end(l), data_copy_size); l 4107 fs/btrfs/ctree.c rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid); l 4119 fs/btrfs/ctree.c btrfs_set_header_nritems(l, mid); l 4124 fs/btrfs/ctree.c btrfs_mark_buffer_dirty(l); l 4218 fs/btrfs/ctree.c struct extent_buffer *l; l 4230 fs/btrfs/ctree.c l = path->nodes[0]; l 4232 fs/btrfs/ctree.c if (extend && data_size + btrfs_item_size_nr(l, slot) + l 4240 fs/btrfs/ctree.c if (slot < btrfs_header_nritems(l)) l 4241 fs/btrfs/ctree.c space_needed -= btrfs_leaf_free_space(l); l 4250 fs/btrfs/ctree.c space_needed -= btrfs_leaf_free_space(l); l 4256 fs/btrfs/ctree.c l = path->nodes[0]; l 4259 fs/btrfs/ctree.c if (btrfs_leaf_free_space(l) >= data_size) l 4270 fs/btrfs/ctree.c l = path->nodes[0]; l 4272 fs/btrfs/ctree.c nritems = btrfs_header_nritems(l); l 4277 fs/btrfs/ctree.c leaf_space_used(l, mid, nritems - mid) + data_size > l 4284 fs/btrfs/ctree.c leaf_space_used(l, mid, nritems - mid) + l 4293 fs/btrfs/ctree.c if (leaf_space_used(l, 0, mid) + data_size > l 4302 fs/btrfs/ctree.c leaf_space_used(l, mid, nritems - mid) + l 4315 fs/btrfs/ctree.c btrfs_item_key(l, &disk_key, mid); l 4318 fs/btrfs/ctree.c l->start, 0); l 4353 fs/btrfs/ctree.c copy_for_split(trans, path, l, right, slot, mid, nritems); l 531 fs/btrfs/inode-map.c struct extent_buffer *l; l 549 fs/btrfs/inode-map.c l = path->nodes[0]; l 550 fs/btrfs/inode-map.c btrfs_item_key_to_cpu(l, &found_key, slot); l 3016 fs/btrfs/inode.c struct extent_buffer *l; l 3024 fs/btrfs/inode.c l = path->nodes[0]; l 3027 fs/btrfs/inode.c if (slot >= btrfs_header_nritems(l)) { l 3036 fs/btrfs/inode.c btrfs_item_key_to_cpu(l, &key, slot); l 3045 fs/btrfs/inode.c extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item); l 3047 fs/btrfs/inode.c num_bytes = btrfs_file_extent_num_bytes(l, extent); l 3051 fs/btrfs/inode.c disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent); l 3055 fs/btrfs/inode.c extent_offset = btrfs_file_extent_offset(l, extent); l 2301 fs/btrfs/ioctl.c struct extent_buffer *l; l 2343 fs/btrfs/ioctl.c l = path->nodes[0]; l 2345 fs/btrfs/ioctl.c btrfs_item_key_to_cpu(l, &key, slot); l 2347 fs/btrfs/ioctl.c iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); l 2348 fs/btrfs/ioctl.c len = btrfs_inode_ref_name_len(l, iref); l 2357 fs/btrfs/ioctl.c read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len); l 128 fs/btrfs/print-tree.c static void print_uuid_item(struct extent_buffer *l, unsigned long offset, l 139 fs/btrfs/print-tree.c read_extent_buffer(l, &subvol_id, offset, sizeof(subvol_id)); l 166 fs/btrfs/print-tree.c void btrfs_print_leaf(struct extent_buffer *l) l 183 fs/btrfs/print-tree.c if (!l) l 186 fs/btrfs/print-tree.c fs_info = l->fs_info; l 187 fs/btrfs/print-tree.c nr = btrfs_header_nritems(l); l 191 fs/btrfs/print-tree.c btrfs_header_bytenr(l), btrfs_header_generation(l), nr, l 192 fs/btrfs/print-tree.c btrfs_leaf_free_space(l), btrfs_header_owner(l)); l 193 fs/btrfs/print-tree.c print_eb_refs_lock(l); l 196 fs/btrfs/print-tree.c btrfs_item_key_to_cpu(l, &key, i); l 200 fs/btrfs/print-tree.c btrfs_item_offset(l, item), btrfs_item_size(l, item)); l 203 fs/btrfs/print-tree.c ii = btrfs_item_ptr(l, i, struct btrfs_inode_item); l 205 fs/btrfs/print-tree.c btrfs_inode_generation(l, ii), l 206 fs/btrfs/print-tree.c btrfs_inode_size(l, ii), l 207 fs/btrfs/print-tree.c btrfs_inode_mode(l, ii)); l 210 fs/btrfs/print-tree.c di = btrfs_item_ptr(l, i, struct btrfs_dir_item); l 211 fs/btrfs/print-tree.c btrfs_dir_item_key_to_cpu(l, di, &found_key); l 214 fs/btrfs/print-tree.c btrfs_dir_type(l, di)); l 217 fs/btrfs/print-tree.c ri = btrfs_item_ptr(l, i, struct btrfs_root_item); l 219 fs/btrfs/print-tree.c btrfs_disk_root_bytenr(l, ri), l 220 fs/btrfs/print-tree.c btrfs_disk_root_refs(l, ri)); l 224 fs/btrfs/print-tree.c print_extent_item(l, i, type); l 233 fs/btrfs/print-tree.c dref = btrfs_item_ptr(l, i, l 235 fs/btrfs/print-tree.c print_extent_data_ref(l, dref); l 238 fs/btrfs/print-tree.c sref = btrfs_item_ptr(l, i, l 241 fs/btrfs/print-tree.c btrfs_shared_data_ref_count(l, sref)); l 244 fs/btrfs/print-tree.c fi = btrfs_item_ptr(l, i, l 246 fs/btrfs/print-tree.c if (btrfs_file_extent_type(l, fi) == l 249 fs/btrfs/print-tree.c btrfs_file_extent_ram_bytes(l, fi)); l 253 fs/btrfs/print-tree.c btrfs_file_extent_disk_bytenr(l, fi), l 254 fs/btrfs/print-tree.c btrfs_file_extent_disk_num_bytes(l, fi)); l 256 fs/btrfs/print-tree.c btrfs_file_extent_offset(l, fi), l 257 fs/btrfs/print-tree.c btrfs_file_extent_num_bytes(l, fi), l 258 fs/btrfs/print-tree.c btrfs_file_extent_ram_bytes(l, fi)); l 265 fs/btrfs/print-tree.c bi = btrfs_item_ptr(l, i, l 269 fs/btrfs/print-tree.c btrfs_disk_block_group_used(l, bi), l 270 fs/btrfs/print-tree.c btrfs_disk_block_group_chunk_objectid(l, bi), l 271 fs/btrfs/print-tree.c btrfs_disk_block_group_flags(l, bi)); l 274 fs/btrfs/print-tree.c print_chunk(l, btrfs_item_ptr(l, i, l 278 fs/btrfs/print-tree.c print_dev_item(l, btrfs_item_ptr(l, i, l 282 fs/btrfs/print-tree.c dev_extent = btrfs_item_ptr(l, i, l 285 fs/btrfs/print-tree.c btrfs_dev_extent_chunk_tree(l, dev_extent), l 286 fs/btrfs/print-tree.c btrfs_dev_extent_chunk_objectid(l, dev_extent), l 287 fs/btrfs/print-tree.c btrfs_dev_extent_chunk_offset(l, dev_extent), l 288 fs/btrfs/print-tree.c btrfs_dev_extent_length(l, dev_extent)); l 317 fs/btrfs/print-tree.c print_uuid_item(l, btrfs_item_ptr_offset(l, i), l 318 fs/btrfs/print-tree.c btrfs_item_size_nr(l, i)); l 9 fs/btrfs/print-tree.h void btrfs_print_leaf(struct extent_buffer *l); l 333 fs/btrfs/qgroup.c struct extent_buffer *l; l 371 fs/btrfs/qgroup.c l = path->nodes[0]; l 372 fs/btrfs/qgroup.c btrfs_item_key_to_cpu(l, &found_key, slot); l 377 fs/btrfs/qgroup.c ptr = btrfs_item_ptr(l, slot, l 380 fs/btrfs/qgroup.c if (btrfs_qgroup_status_version(l, ptr) != l 386 fs/btrfs/qgroup.c if (btrfs_qgroup_status_generation(l, ptr) != l 392 fs/btrfs/qgroup.c fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, l 394 fs/btrfs/qgroup.c rescan_progress = btrfs_qgroup_status_rescan(l, ptr); l 419 fs/btrfs/qgroup.c ptr = btrfs_item_ptr(l, slot, l 421 fs/btrfs/qgroup.c qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); l 422 fs/btrfs/qgroup.c qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); l 423 fs/btrfs/qgroup.c qgroup->excl = btrfs_qgroup_info_excl(l, ptr); l 424 fs/btrfs/qgroup.c qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); l 431 fs/btrfs/qgroup.c ptr = btrfs_item_ptr(l, slot, l 433 fs/btrfs/qgroup.c qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); l 434 fs/btrfs/qgroup.c qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); l 435 fs/btrfs/qgroup.c qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); l 436 fs/btrfs/qgroup.c qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); l 437 fs/btrfs/qgroup.c qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); l 461 fs/btrfs/qgroup.c l = path->nodes[0]; l 462 fs/btrfs/qgroup.c btrfs_item_key_to_cpu(l, &found_key, slot); l 708 fs/btrfs/qgroup.c struct extent_buffer *l; l 728 fs/btrfs/qgroup.c l = path->nodes[0]; l 730 fs/btrfs/qgroup.c qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); l 731 fs/btrfs/qgroup.c btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); l 732 fs/btrfs/qgroup.c btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); l 733 fs/btrfs/qgroup.c btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); l 734 fs/btrfs/qgroup.c btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); l 735 fs/btrfs/qgroup.c btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); l 737 fs/btrfs/qgroup.c btrfs_mark_buffer_dirty(l); l 751 fs/btrfs/qgroup.c struct extent_buffer *l; l 774 fs/btrfs/qgroup.c l = path->nodes[0]; l 776 fs/btrfs/qgroup.c qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); l 777 fs/btrfs/qgroup.c btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); l 778 fs/btrfs/qgroup.c btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); l 779 fs/btrfs/qgroup.c btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); l 780 fs/btrfs/qgroup.c btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); l 781 fs/btrfs/qgroup.c btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); l 783 fs/btrfs/qgroup.c btrfs_mark_buffer_dirty(l); l 796 fs/btrfs/qgroup.c struct extent_buffer *l; l 816 fs/btrfs/qgroup.c l = path->nodes[0]; l 818 fs/btrfs/qgroup.c ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); l 819 fs/btrfs/qgroup.c btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags); l 820 fs/btrfs/qgroup.c btrfs_set_qgroup_status_generation(l, ptr, trans->transid); l 821 fs/btrfs/qgroup.c btrfs_set_qgroup_status_rescan(l, ptr, l 824 fs/btrfs/qgroup.c btrfs_mark_buffer_dirty(l); l 593 fs/btrfs/ref-verify.c int l; l 595 fs/btrfs/ref-verify.c for (l = 0; l < BTRFS_MAX_LEVEL; l++) { l 596 fs/btrfs/ref-verify.c if (!path->nodes[l]) l 598 fs/btrfs/ref-verify.c if (l) { l 599 fs/btrfs/ref-verify.c path->slots[l]++; l 600 fs/btrfs/ref-verify.c if (path->slots[l] < l 601 fs/btrfs/ref-verify.c btrfs_header_nritems(path->nodes[l])) { l 602 fs/btrfs/ref-verify.c *level = l; l 606 fs/btrfs/ref-verify.c btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]); l 607 fs/btrfs/ref-verify.c free_extent_buffer(path->nodes[l]); l 608 fs/btrfs/ref-verify.c path->nodes[l] = NULL; l 609 fs/btrfs/ref-verify.c path->slots[l] = 0; l 610 fs/btrfs/ref-verify.c path->locks[l] = 0; l 71 fs/btrfs/root-tree.c struct extent_buffer *l; l 90 fs/btrfs/root-tree.c l = path->nodes[0]; l 93 fs/btrfs/root-tree.c btrfs_item_key_to_cpu(l, &found_key, slot); l 101 fs/btrfs/root-tree.c btrfs_read_root_item(l, slot, root_item); l 126 fs/btrfs/root-tree.c struct extent_buffer *l; l 150 fs/btrfs/root-tree.c l = path->nodes[0]; l 152 fs/btrfs/root-tree.c ptr = btrfs_item_ptr_offset(l, slot); l 153 fs/btrfs/root-tree.c old_len = btrfs_item_size_nr(l, slot); l 181 fs/btrfs/root-tree.c l = path->nodes[0]; l 183 fs/btrfs/root-tree.c ptr = btrfs_item_ptr_offset(l, slot); l 192 fs/btrfs/root-tree.c write_extent_buffer(l, item, ptr, sizeof(*item)); l 1814 fs/btrfs/scrub.c u64 l = min_t(u64, len, PAGE_SIZE); l 1816 fs/btrfs/scrub.c crypto_shash_update(shash, buffer, l); l 1818 fs/btrfs/scrub.c len -= l; l 1884 fs/btrfs/scrub.c u64 l = min_t(u64, len, mapped_size); l 1886 fs/btrfs/scrub.c crypto_shash_update(shash, p, l); l 1888 fs/btrfs/scrub.c len -= l; l 1947 fs/btrfs/scrub.c u64 l = min_t(u64, len, mapped_size); l 1949 fs/btrfs/scrub.c crypto_shash_update(shash, p, l); l 1951 fs/btrfs/scrub.c len -= l; l 2245 fs/btrfs/scrub.c u64 l = min_t(u64, len, PAGE_SIZE); l 2277 fs/btrfs/scrub.c len -= l; l 2278 fs/btrfs/scrub.c logical += l; l 2279 fs/btrfs/scrub.c physical += l; l 2280 fs/btrfs/scrub.c physical_for_dev_replace += l; l 2503 fs/btrfs/scrub.c u64 l = min_t(u64, len, blocksize); l 2512 fs/btrfs/scrub.c ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, l 2517 fs/btrfs/scrub.c len -= l; l 2518 fs/btrfs/scrub.c logical += l; l 2519 fs/btrfs/scrub.c physical += l; l 2520 fs/btrfs/scrub.c physical_for_dev_replace += l; l 2552 fs/btrfs/scrub.c u64 l = min_t(u64, len, PAGE_SIZE); l 2587 fs/btrfs/scrub.c len -= l; l 2588 fs/btrfs/scrub.c logical += l; l 2589 fs/btrfs/scrub.c physical += l; l 2634 fs/btrfs/scrub.c u64 l = min_t(u64, len, blocksize); l 2643 fs/btrfs/scrub.c ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, l 2649 fs/btrfs/scrub.c len -= l; l 2650 fs/btrfs/scrub.c logical += l; l 2651 fs/btrfs/scrub.c physical += l; l 2833 fs/btrfs/scrub.c struct extent_buffer *l; l 2899 fs/btrfs/scrub.c l = path->nodes[0]; l 2901 fs/btrfs/scrub.c if (slot >= btrfs_header_nritems(l)) { l 2911 fs/btrfs/scrub.c btrfs_item_key_to_cpu(l, &key, slot); l 2933 fs/btrfs/scrub.c extent = btrfs_item_ptr(l, slot, l 2935 fs/btrfs/scrub.c flags = btrfs_extent_flags(l, extent); l 2936 fs/btrfs/scrub.c generation = btrfs_extent_generation(l, extent); l 3058 fs/btrfs/scrub.c struct extent_buffer *l; l 3251 fs/btrfs/scrub.c l = path->nodes[0]; l 3253 fs/btrfs/scrub.c if (slot >= btrfs_header_nritems(l)) { l 3263 fs/btrfs/scrub.c btrfs_item_key_to_cpu(l, &key, slot); l 3284 fs/btrfs/scrub.c extent = btrfs_item_ptr(l, slot, l 3286 fs/btrfs/scrub.c flags = btrfs_extent_flags(l, extent); l 3287 fs/btrfs/scrub.c generation = btrfs_extent_generation(l, extent); l 3481 fs/btrfs/scrub.c struct extent_buffer *l; l 3518 fs/btrfs/scrub.c l = path->nodes[0]; l 3521 fs/btrfs/scrub.c btrfs_item_key_to_cpu(l, &found_key, slot); l 3535 fs/btrfs/scrub.c dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); l 3536 fs/btrfs/scrub.c length = btrfs_dev_extent_length(l, dev_extent); l 3541 fs/btrfs/scrub.c chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); l 1613 fs/btrfs/volumes.c struct extent_buffer *l; l 1654 fs/btrfs/volumes.c l = path->nodes[0]; l 1656 fs/btrfs/volumes.c if (slot >= btrfs_header_nritems(l)) { l 1665 fs/btrfs/volumes.c btrfs_item_key_to_cpu(l, &key, slot); l 1711 fs/btrfs/volumes.c dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); l 1712 fs/btrfs/volumes.c extent_end = key.offset + btrfs_dev_extent_length(l, l 4728 fs/btrfs/volumes.c struct extent_buffer *l; l 4802 fs/btrfs/volumes.c l = path->nodes[0]; l 4804 fs/btrfs/volumes.c btrfs_item_key_to_cpu(l, &key, path->slots[0]); l 4812 fs/btrfs/volumes.c dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); l 4813 fs/btrfs/volumes.c length = btrfs_dev_extent_length(l, dev_extent); l 4821 fs/btrfs/volumes.c chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); l 5572 fs/btrfs/volumes.c u64 l; l 5581 fs/btrfs/volumes.c l = bbio->raid_map[i]; l 5585 fs/btrfs/volumes.c bbio->raid_map[i+1] = l; l 93 fs/ceph/dir.c static int fpos_cmp(loff_t l, loff_t r) l 95 fs/ceph/dir.c int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); l 98 fs/ceph/dir.c return (int)(fpos_off(l) - fpos_off(r)); l 302 fs/ceph/inode.c static int frag_tree_split_cmp(const void *l, const void *r) l 304 fs/ceph/inode.c struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; l 20 fs/ceph/ioctl.c struct ceph_ioctl_layout l; l 25 fs/ceph/ioctl.c l.stripe_unit = ci->i_layout.stripe_unit; l 26 fs/ceph/ioctl.c l.stripe_count = ci->i_layout.stripe_count; l 27 fs/ceph/ioctl.c l.object_size = ci->i_layout.object_size; l 28 fs/ceph/ioctl.c l.data_pool = ci->i_layout.pool_id; l 29 fs/ceph/ioctl.c l.preferred_osd = -1; l 30 fs/ceph/ioctl.c if (copy_to_user(arg, &l, sizeof(l))) l 38 fs/ceph/ioctl.c struct ceph_ioctl_layout *l) l 43 fs/ceph/ioctl.c if ((l->object_size & ~PAGE_MASK) || l 44 fs/ceph/ioctl.c (l->stripe_unit & ~PAGE_MASK) || l 45 fs/ceph/ioctl.c ((unsigned)l->stripe_unit != 0 && l 46 fs/ceph/ioctl.c ((unsigned)l->object_size % (unsigned)l->stripe_unit))) l 53 fs/ceph/ioctl.c if (mdsc->mdsmap->m_data_pg_pools[i] == l->data_pool) { l 69 fs/ceph/ioctl.c struct ceph_ioctl_layout l; l 74 fs/ceph/ioctl.c if (copy_from_user(&l, arg, sizeof(l))) l 83 fs/ceph/ioctl.c if (l.stripe_count) l 84 fs/ceph/ioctl.c nl.stripe_count = l.stripe_count; l 87 fs/ceph/ioctl.c if (l.stripe_unit) l 88 fs/ceph/ioctl.c nl.stripe_unit = l.stripe_unit; l 91 fs/ceph/ioctl.c if (l.object_size) l 92 fs/ceph/ioctl.c nl.object_size = l.object_size; l 95 fs/ceph/ioctl.c if (l.data_pool) l 96 fs/ceph/ioctl.c nl.data_pool = l.data_pool; l 118 fs/ceph/ioctl.c cpu_to_le32(l.stripe_unit); l 120 fs/ceph/ioctl.c cpu_to_le32(l.stripe_count); l 122 fs/ceph/ioctl.c cpu_to_le32(l.object_size); l 123 fs/ceph/ioctl.c req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); l 140 fs/ceph/ioctl.c struct ceph_ioctl_layout l; l 145 fs/ceph/ioctl.c if (copy_from_user(&l, arg, sizeof(l))) l 148 fs/ceph/ioctl.c err = __validate_layout(mdsc, &l); l 162 fs/ceph/ioctl.c cpu_to_le32(l.stripe_unit); l 164 fs/ceph/ioctl.c cpu_to_le32(l.stripe_count); l 166 fs/ceph/ioctl.c cpu_to_le32(l.object_size); l 168 fs/ceph/ioctl.c cpu_to_le32(l.data_pool); l 398 fs/ceph/locks.c int l = 0; l 413 fs/ceph/locks.c err = lock_to_ceph_filelock(lock, &flocks[l]); l 416 fs/ceph/locks.c ++l; l 424 fs/ceph/locks.c err = lock_to_ceph_filelock(lock, &flocks[l]); l 427 fs/ceph/locks.c ++l; l 147 fs/cifs/dfs_cache.c struct hlist_head *l = &dfs_cache_htable[i]; l 150 fs/cifs/dfs_cache.c hlist_for_each_entry_rcu(ce, l, ce_hlist) l 20 fs/dlm/memory.h void dlm_free_lkb(struct dlm_lkb *l); l 22 fs/dlm/memory.h void dlm_free_lvb(char *l); l 153 fs/ext2/namei.c unsigned l = strlen(symname)+1; l 156 fs/ext2/namei.c if (l > sb->s_blocksize) l 168 fs/ext2/namei.c if (l > sizeof (EXT2_I(inode)->i_data)) { l 176 fs/ext2/namei.c err = page_symlink(inode, symname, l); l 183 fs/ext2/namei.c memcpy(inode->i_link, symname, l); l 184 fs/ext2/namei.c inode->i_size = l-1; l 640 fs/ext4/extents.c int k, l = path->p_depth; l 643 fs/ext4/extents.c for (k = 0; k <= l; k++, path++) { l 744 fs/ext4/extents.c struct ext4_extent_idx *r, *l, *m; l 749 fs/ext4/extents.c l = EXT_FIRST_INDEX(eh) + 1; l 751 fs/ext4/extents.c while (l <= r) { l 752 fs/ext4/extents.c m = l + (r - l) / 2; l 756 fs/ext4/extents.c l = m + 1; l 757 fs/ext4/extents.c ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), l 762 fs/ext4/extents.c path->p_idx = l - 1; l 804 fs/ext4/extents.c struct ext4_extent *r, *l, *m; l 816 fs/ext4/extents.c l = EXT_FIRST_EXTENT(eh) + 1; l 819 fs/ext4/extents.c while (l <= r) { l 820 fs/ext4/extents.c m = l + (r - l) / 2; l 824 fs/ext4/extents.c l = m + 1; l 825 fs/ext4/extents.c ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), l 830 fs/ext4/extents.c path->p_ext = l - 1; l 918 fs/ext4/super.c static inline struct inode *orphan_list_entry(struct list_head *l) l 920 fs/ext4/super.c return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; l 925 fs/ext4/super.c struct list_head *l; l 931 fs/ext4/super.c list_for_each(l, &sbi->s_orphan) { l 932 fs/ext4/super.c struct inode *inode = orphan_list_entry(l); l 351 fs/gfs2/xattr.c unsigned int l; l 359 fs/gfs2/xattr.c l = 5; l 363 fs/gfs2/xattr.c l = 7; l 367 fs/gfs2/xattr.c l = 9; l 373 fs/gfs2/xattr.c ea_size = l + ea->ea_name_len + 1; l 378 fs/gfs2/xattr.c memcpy(er->er_data + ei->ei_size, prefix, l); l 379 fs/gfs2/xattr.c memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea), l 25 fs/hfsplus/bnode.c int l; l 31 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE - off); l 32 fs/hfsplus/bnode.c memcpy(buf, kmap(*pagep) + off, l); l 35 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 36 fs/hfsplus/bnode.c buf += l; l 37 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE); l 38 fs/hfsplus/bnode.c memcpy(buf, kmap(*++pagep), l); l 78 fs/hfsplus/bnode.c int l; l 84 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE - off); l 85 fs/hfsplus/bnode.c memcpy(kmap(*pagep) + off, buf, l); l 89 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 90 fs/hfsplus/bnode.c buf += l; l 91 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE); l 92 fs/hfsplus/bnode.c memcpy(kmap(*++pagep), buf, l); l 108 fs/hfsplus/bnode.c int l; l 114 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE - off); l 115 fs/hfsplus/bnode.c memset(kmap(*pagep) + off, 0, l); l 119 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 120 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE); l 121 fs/hfsplus/bnode.c memset(kmap(*++pagep), 0, l); l 131 fs/hfsplus/bnode.c int l; l 144 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE - src); l 145 fs/hfsplus/bnode.c memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); l 150 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 151 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE); l 152 fs/hfsplus/bnode.c memcpy(kmap(*++dst_page), kmap(*++src_page), l); l 164 fs/hfsplus/bnode.c l = PAGE_SIZE - src; l 166 fs/hfsplus/bnode.c dst += l; l 168 fs/hfsplus/bnode.c l = PAGE_SIZE - dst; l 169 fs/hfsplus/bnode.c src += l; l 172 fs/hfsplus/bnode.c l = min(len, l); l 173 fs/hfsplus/bnode.c memcpy(dst_ptr, src_ptr, l); l 181 fs/hfsplus/bnode.c } while ((len -= l)); l 188 fs/hfsplus/bnode.c int l; l 227 fs/hfsplus/bnode.c l = src; l 229 fs/hfsplus/bnode.c dst -= l; l 231 fs/hfsplus/bnode.c l = dst; l 232 fs/hfsplus/bnode.c src -= l; l 235 fs/hfsplus/bnode.c l = min(len, l); l 236 fs/hfsplus/bnode.c memmove(dst_ptr - l, src_ptr - l, l); l 244 fs/hfsplus/bnode.c } while ((len -= l)); l 253 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE - src); l 255 fs/hfsplus/bnode.c kmap(*src_page) + src, l); l 260 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 261 fs/hfsplus/bnode.c l = min_t(int, len, PAGE_SIZE); l 263 fs/hfsplus/bnode.c kmap(*++src_page), l); l 276 fs/hfsplus/bnode.c l = PAGE_SIZE - src; l 278 fs/hfsplus/bnode.c dst += l; l 280 fs/hfsplus/bnode.c l = PAGE_SIZE - dst; l 281 fs/hfsplus/bnode.c src += l; l 284 fs/hfsplus/bnode.c l = min(len, l); l 285 fs/hfsplus/bnode.c memmove(dst_ptr, src_ptr, l); l 293 fs/hfsplus/bnode.c } while ((len -= l)); l 312 fs/hfsplus/unicode.c int l, v, t; l 318 fs/hfsplus/unicode.c l = Hangul_LBase + index / Hangul_NCount; l 322 fs/hfsplus/unicode.c result[0] = l; l 344 fs/hpfs/anode.c unsigned l; l 353 fs/hpfs/anode.c l = 0x200 - (pos & 0x1ff); if (l > len) l = len; l 354 fs/hpfs/anode.c memcpy(buf, data + (pos & 0x1ff), l); l 356 fs/hpfs/anode.c buf += l; pos += l; len -= l; l 367 fs/hpfs/anode.c unsigned l; l 376 fs/hpfs/anode.c l = 0x200 - (pos & 0x1ff); if (l > len) l = len; l 377 fs/hpfs/anode.c memcpy(data + (pos & 0x1ff), buf, l); l 380 fs/hpfs/anode.c buf += l; pos += l; len -= l; l 20 fs/hpfs/dentry.c unsigned l = qstr->len; l 22 fs/hpfs/dentry.c if (l == 1) if (qstr->name[0]=='.') goto x; l 23 fs/hpfs/dentry.c if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x; l 24 fs/hpfs/dentry.c hpfs_adjust_length(qstr->name, &l); l 31 fs/hpfs/dentry.c for (i = 0; i < l; i++) l 79 fs/hpfs/name.c unsigned l = l1 < l2 ? l1 : l2; l 82 fs/hpfs/name.c for (i = 0; i < l; i++) { l 855 fs/hugetlbfs/inode.c int l = strlen(symname)+1; l 856 fs/hugetlbfs/inode.c error = page_symlink(inode, symname, l); l 1117 fs/jffs2/scan.c static int count_list(struct list_head *l) l 1122 fs/jffs2/scan.c list_for_each(tmp, l) { l 109 fs/jfs/jfs_dmap.h #define BLKTOCTL(b,s,l) \ l 110 fs/jfs/jfs_dmap.h (((l) == 2) ? 1 : ((l) == 1) ? BLKTOL1((b),(s)) : BLKTOL0((b),(s))) l 519 fs/locks.c struct flock64 *l) l 521 fs/locks.c switch (l->l_whence) { l 534 fs/locks.c if (l->l_start > OFFSET_MAX - fl->fl_start) l 536 fs/locks.c fl->fl_start += l->l_start; l 542 fs/locks.c if (l->l_len > 0) { l 543 fs/locks.c if (l->l_len - 1 > OFFSET_MAX - fl->fl_start) l 545 fs/locks.c fl->fl_end = fl->fl_start + l->l_len - 1; l 547 fs/locks.c } else if (l->l_len < 0) { l 548 fs/locks.c if (fl->fl_start + l->l_len < 0) l 551 fs/locks.c fl->fl_start += l->l_len; l 562 fs/locks.c return assign_type(fl, l->l_type); l 569 fs/locks.c struct flock *l) l 572 fs/locks.c .l_type = l->l_type, l 573 fs/locks.c .l_whence = l->l_whence, l 574 fs/locks.c .l_start = l->l_start, l 575 fs/locks.c .l_len = l->l_len, l 121 fs/minix/dir.c unsigned l = strnlen(name, sbi->s_namelen); l 122 fs/minix/dir.c if (!dir_emit(ctx, name, l, l 147 fs/nfs/nfs4proc.c struct iattr *sattr, struct nfs4_label *l) l 3700 fs/nfs/nfs4proc.c struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; l 3702 fs/nfs/nfs4proc.c label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); l 4455 fs/nfs/nfs4proc.c struct nfs4_label l, *ilabel = NULL; l 4464 fs/nfs/nfs4proc.c ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); l 4808 fs/nfs/nfs4proc.c struct nfs4_label l, *label = NULL; l 4811 fs/nfs/nfs4proc.c label = nfs4_label_init_security(dir, dentry, sattr, &l); l 4849 fs/nfs/nfs4proc.c struct nfs4_label l, *label = NULL; l 4852 fs/nfs/nfs4proc.c label = nfs4_label_init_security(dir, dentry, sattr, &l); l 4964 fs/nfs/nfs4proc.c struct nfs4_label l, *label = NULL; l 4967 fs/nfs/nfs4proc.c label = nfs4_label_init_security(dir, dentry, sattr, &l); l 386 fs/nfsd/nfs4layouts.c struct nfs4_layout_stateid *l, *n; l 391 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) { l 392 fs/nfsd/nfs4layouts.c if (l != ls) { l 393 fs/nfsd/nfs4layouts.c nfsd4_recall_file_layout(l); l 298 fs/nfsd/nfsd.h #define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) l 132 fs/nilfs2/namei.c unsigned int l = strlen(symname) + 1; l 136 fs/nilfs2/namei.c if (l > sb->s_blocksize) l 152 fs/nilfs2/namei.c err = page_symlink(inode, symname, l); l 57 fs/nls/nls_base.c unsigned long l; l 63 fs/nls/nls_base.c l = c0; l 67 fs/nls/nls_base.c l &= t->lmask; l 68 fs/nls/nls_base.c if (l < t->lval || l > UNICODE_MAX || l 69 fs/nls/nls_base.c (l & SURROGATE_MASK) == SURROGATE_PAIR) l 71 fs/nls/nls_base.c *pu = (unicode_t) l; l 80 fs/nls/nls_base.c l = (l << 6) | c; l 88 fs/nls/nls_base.c unsigned long l; l 95 fs/nls/nls_base.c l = u; l 96 fs/nls/nls_base.c if (l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR) l 102 fs/nls/nls_base.c if (l <= t->lmask) { l 104 fs/nls/nls_base.c *s = (u8) (t->cval | (l >> c)); l 108 fs/nls/nls_base.c *s = (u8) (0x80 | ((l >> c) & 0x3F)); l 17 fs/nls/nls_euc-jp.c #define IS_SJIS_LOW_BYTE(l) ((0x40 <= (l)) && ((l) <= 0xFC) && ((l) != 0x7F)) l 19 fs/nls/nls_euc-jp.c #define IS_SJIS_JISX0208(h, l) ((((0x81 <= (h)) && ((h) <= 0x9F)) \ l 21 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 23 fs/nls/nls_euc-jp.c #define IS_SJIS_UDC_LOW(h, l) (((0xF0 <= (h)) && ((h) <= 0xF4)) \ l 24 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 25 fs/nls/nls_euc-jp.c #define IS_SJIS_UDC_HI(h, l) (((0xF5 <= (h)) && ((h) <= 0xF9)) \ l 26 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 27 fs/nls/nls_euc-jp.c #define IS_SJIS_IBM(h, l) (((0xFA <= (h)) && ((h) <= 0xFC)) \ l 28 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 29 fs/nls/nls_euc-jp.c #define IS_SJIS_NECIBM(h, l) (((0xED <= (h)) && ((h) <= 0xEE)) \ l 30 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 44 fs/nls/nls_euc-jp.c #define IS_EUC_JISX0208(h, l) (IS_EUC_BYTE(h) && IS_EUC_BYTE(l)) l 45 fs/nls/nls_euc-jp.c #define IS_EUC_JISX0201KANA(h, l) (((h) == SS2) && (0xA1 <= (l) && (l) <= 0xDF)) l 46 fs/nls/nls_euc-jp.c #define IS_EUC_UDC_LOW(h, l) (((0xF5 <= (h)) && ((h) <= 0xFE)) \ l 47 fs/nls/nls_euc-jp.c && IS_EUC_BYTE(l)) l 48 fs/nls/nls_euc-jp.c #define IS_EUC_UDC_HI(h, l) IS_EUC_UDC_LOW(h, l) /* G3 block */ l 141 fs/nls/nls_euc-jp.c #define IS_EUC_IBM2JISX0208(h, l) \ l 142 fs/nls/nls_euc-jp.c (((h) == 0xA2 && (l) == 0xCC) || ((h) == 0xA2 && (l) == 0xE8)) l 1072 fs/ntfs/runlist.c s64 l = n; l 1078 fs/ntfs/runlist.c l >>= 8; l 1080 fs/ntfs/runlist.c } while (l != 0 && l != -1); l 1241 fs/ntfs/runlist.c s64 l = n; l 1249 fs/ntfs/runlist.c *dst++ = l & 0xffll; l 1250 fs/ntfs/runlist.c l >>= 8; l 1252 fs/ntfs/runlist.c } while (l != 0 && l != -1); l 963 fs/ocfs2/dlm/dlmdomain.c char *l, *r; l 995 fs/ocfs2/dlm/dlmdomain.c l = local; l 1000 fs/ocfs2/dlm/dlmdomain.c if (!memcmp(l, r, O2HB_MAX_REGION_NAME_LEN)) { l 1010 fs/ocfs2/dlm/dlmdomain.c qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, l, l 1014 fs/ocfs2/dlm/dlmdomain.c l += O2HB_MAX_REGION_NAME_LEN; l 1021 fs/ocfs2/dlm/dlmdomain.c l = local; l 1023 fs/ocfs2/dlm/dlmdomain.c if (!memcmp(r, l, O2HB_MAX_REGION_NAME_LEN)) { l 1027 fs/ocfs2/dlm/dlmdomain.c l += O2HB_MAX_REGION_NAME_LEN; l 1785 fs/ocfs2/namei.c int status, l, credits; l 1821 fs/ocfs2/namei.c l = strlen(symname) + 1; l 1890 fs/ocfs2/namei.c if (l > ocfs2_fast_symlink_chars(sb)) l 1932 fs/ocfs2/namei.c newsize = l - 1; l 1935 fs/ocfs2/namei.c if (l > ocfs2_fast_symlink_chars(sb)) { l 1962 fs/ocfs2/namei.c memcpy((char *) fe->id2.i_symlink, symname, l); l 1371 fs/ocfs2/refcounttree.c const struct ocfs2_refcount_rec *l = a, *r = b; l 1372 fs/ocfs2/refcounttree.c u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l); l 1384 fs/ocfs2/refcounttree.c const struct ocfs2_refcount_rec *l = a, *r = b; l 1385 fs/ocfs2/refcounttree.c u64 l_cpos = le64_to_cpu(l->r_cpos); l 1397 fs/ocfs2/refcounttree.c struct ocfs2_refcount_rec *l = a, *r = b; l 1399 fs/ocfs2/refcounttree.c swap(*l, *r); l 4156 fs/ocfs2/xattr.c const struct ocfs2_xattr_entry *l = a, *r = b; l 4157 fs/ocfs2/xattr.c u32 l_hash = le32_to_cpu(l->xe_name_hash); l 4169 fs/ocfs2/xattr.c struct ocfs2_xattr_entry *l = a, *r = b, tmp; l 4171 fs/ocfs2/xattr.c tmp = *l; l 4172 fs/ocfs2/xattr.c memcpy(l, r, sizeof(struct ocfs2_xattr_entry)); l 4368 fs/ocfs2/xattr.c const struct ocfs2_xattr_entry *l = a, *r = b; l 4369 fs/ocfs2/xattr.c u32 l_name_offset = le16_to_cpu(l->xe_name_offset); l 138 fs/ramfs/inode.c int l = strlen(symname)+1; l 139 fs/ramfs/inode.c error = page_symlink(inode, symname, l); l 917 fs/reiserfs/fix_node.c struct buffer_head *l, *f; l 921 fs/reiserfs/fix_node.c (l = tb->FL[h]) == NULL) l 924 fs/reiserfs/fix_node.c if (f == l) l 927 fs/reiserfs/fix_node.c order = B_NR_ITEMS(l); l 928 fs/reiserfs/fix_node.c f = l; l 499 fs/reiserfs/item_ops.c int k, l; l 501 fs/reiserfs/item_ops.c l = 0; l 503 fs/reiserfs/item_ops.c l += dir_u->entry_sizes[k]; l 505 fs/reiserfs/item_ops.c if (l + IH_SIZE != vi->vi_item_len + l 793 fs/reiserfs/journal.c #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list) l 72 fs/sysv/namei.c int l = strlen(symname)+1; l 75 fs/sysv/namei.c if (l > dir->i_sb->s_blocksize) l 84 fs/sysv/namei.c err = page_symlink(inode, symname, l); l 1044 fs/ubifs/io.c int err, l; l 1069 fs/ubifs/io.c l = le32_to_cpu(ch->len); l 1070 fs/ubifs/io.c if (l != len) { l 1071 fs/ubifs/io.c ubifs_errc(c, "bad node length %d, expected %d", l, len); l 158 fs/udf/partition.c int i, j, k, l; l 228 fs/udf/partition.c for (l = k; l < reallocationTableLen; l++) { l 229 fs/udf/partition.c struct sparingEntry *entry = &st->mapEntry[l]; l 241 fs/udf/partition.c mapEntry = st->mapEntry[l]; l 246 fs/udf/partition.c (l - k) * l 112 fs/ufs/namei.c unsigned l = strlen(symname)+1; l 115 fs/ufs/namei.c if (l > sb->s_blocksize) l 123 fs/ufs/namei.c if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { l 128 fs/ufs/namei.c err = page_symlink(inode, symname, l); l 135 fs/ufs/namei.c memcpy(inode->i_link, symname, l); l 136 fs/ufs/namei.c inode->i_size = l-1; l 1374 fs/unicode/mkutf8data.c static int nfdi_equal(void *l, void *r) l 1376 fs/unicode/mkutf8data.c struct unicode_data *left = l; l 1391 fs/unicode/mkutf8data.c static int nfdicf_equal(void *l, void *r) l 1393 fs/unicode/mkutf8data.c struct unicode_data *left = l; l 1415 fs/unicode/mkutf8data.c static void nfdi_print(void *l, int indent) l 1417 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1430 fs/unicode/mkutf8data.c static void nfdicf_print(void *l, int indent) l 1432 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1446 fs/unicode/mkutf8data.c static int nfdi_mark(void *l) l 1451 fs/unicode/mkutf8data.c static int nfdicf_mark(void *l) l 1453 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1460 fs/unicode/mkutf8data.c static int correction_mark(void *l) l 1462 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1467 fs/unicode/mkutf8data.c static int nfdi_size(void *l) l 1469 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1479 fs/unicode/mkutf8data.c static int nfdicf_size(void *l) l 1481 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1493 fs/unicode/mkutf8data.c static int *nfdi_index(struct tree *tree, void *l) l 1495 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1500 fs/unicode/mkutf8data.c static int *nfdicf_index(struct tree *tree, void *l) l 1502 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1507 fs/unicode/mkutf8data.c static unsigned char *nfdi_emit(void *l, unsigned char *data) l 1509 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 1528 fs/unicode/mkutf8data.c static unsigned char *nfdicf_emit(void *l, unsigned char *data) l 1530 fs/unicode/mkutf8data.c struct unicode_data *leaf = l; l 2114 fs/xfs/libxfs/xfs_alloc.c uint32_t l = be32_to_cpu(agf->agf_fllast); l 2128 fs/xfs/libxfs/xfs_alloc.c if (f >= agfl_size || l >= agfl_size) l 2137 fs/xfs/libxfs/xfs_alloc.c if (c && l >= f) l 2138 fs/xfs/libxfs/xfs_alloc.c active = l - f + 1; l 2140 fs/xfs/libxfs/xfs_alloc.c active = agfl_size - f + l + 1; l 413 fs/xfs/libxfs/xfs_bmap.c nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); l 1245 fs/xfs/libxfs/xfs_bmap.c nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); l 141 fs/xfs/libxfs/xfs_bmap_btree.c ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, l 143 fs/xfs/libxfs/xfs_bmap_btree.c ASSERT(rblock->bb_u.l.bb_blkno == l 147 fs/xfs/libxfs/xfs_bmap_btree.c ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); l 148 fs/xfs/libxfs/xfs_bmap_btree.c ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); l 212 fs/xfs/libxfs/xfs_bmap_btree.c args.fsbno = be64_to_cpu(start->l); l 268 fs/xfs/libxfs/xfs_bmap_btree.c new->l = cpu_to_be64(args.fsbno); l 385 fs/xfs/libxfs/xfs_bmap_btree.c ptr->l = 0; l 68 fs/xfs/libxfs/xfs_btree.c if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) l 70 fs/xfs/libxfs/xfs_btree.c if (block->bb_u.l.bb_blkno != l 73 fs/xfs/libxfs/xfs_btree.c if (block->bb_u.l.bb_pad != cpu_to_be32(0)) l 84 fs/xfs/libxfs/xfs_btree.c if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) && l 85 fs/xfs/libxfs/xfs_btree.c !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib), l 88 fs/xfs/libxfs/xfs_btree.c if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) && l 89 fs/xfs/libxfs/xfs_btree.c !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib), l 234 fs/xfs/libxfs/xfs_btree.c if (xfs_btree_check_lptr(cur, be64_to_cpu((&ptr->l)[index]), l 279 fs/xfs/libxfs/xfs_btree.c block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); l 291 fs/xfs/libxfs/xfs_btree.c if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn))) l 733 fs/xfs/libxfs/xfs_btree.c return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK); l 911 fs/xfs/libxfs/xfs_btree.c xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); l 912 fs/xfs/libxfs/xfs_btree.c xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); l 1001 fs/xfs/libxfs/xfs_btree.c fsbno = be64_to_cpu(ptr->l); l 1051 fs/xfs/libxfs/xfs_btree.c if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)) l 1053 fs/xfs/libxfs/xfs_btree.c if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)) l 1069 fs/xfs/libxfs/xfs_btree.c return ptr->l == cpu_to_be64(NULLFSBLOCK); l 1080 fs/xfs/libxfs/xfs_btree.c ptr->l = cpu_to_be64(NULLFSBLOCK); l 1099 fs/xfs/libxfs/xfs_btree.c ptr->l = block->bb_u.l.bb_rightsib; l 1101 fs/xfs/libxfs/xfs_btree.c ptr->l = block->bb_u.l.bb_leftsib; l 1121 fs/xfs/libxfs/xfs_btree.c block->bb_u.l.bb_rightsib = ptr->l; l 1123 fs/xfs/libxfs/xfs_btree.c block->bb_u.l.bb_leftsib = ptr->l; l 1151 fs/xfs/libxfs/xfs_btree.c buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); l 1152 fs/xfs/libxfs/xfs_btree.c buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); l 1154 fs/xfs/libxfs/xfs_btree.c buf->bb_u.l.bb_blkno = cpu_to_be64(blkno); l 1155 fs/xfs/libxfs/xfs_btree.c buf->bb_u.l.bb_owner = cpu_to_be64(owner); l 1156 fs/xfs/libxfs/xfs_btree.c uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid); l 1157 fs/xfs/libxfs/xfs_btree.c buf->bb_u.l.bb_pad = 0; l 1158 fs/xfs/libxfs/xfs_btree.c buf->bb_u.l.bb_lsn = 0; l 1244 fs/xfs/libxfs/xfs_btree.c ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, l 1531 fs/xfs/libxfs/xfs_btree.c offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib), l 1532 fs/xfs/libxfs/xfs_btree.c offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib), l 1533 fs/xfs/libxfs/xfs_btree.c offsetof(struct xfs_btree_block, bb_u.l.bb_blkno), l 1534 fs/xfs/libxfs/xfs_btree.c offsetof(struct xfs_btree_block, bb_u.l.bb_lsn), l 1535 fs/xfs/libxfs/xfs_btree.c offsetof(struct xfs_btree_block, bb_u.l.bb_uuid), l 1536 fs/xfs/libxfs/xfs_btree.c offsetof(struct xfs_btree_block, bb_u.l.bb_owner), l 1537 fs/xfs/libxfs/xfs_btree.c offsetof(struct xfs_btree_block, bb_u.l.bb_crc), l 1538 fs/xfs/libxfs/xfs_btree.c offsetof(struct xfs_btree_block, bb_u.l.bb_pad), l 1806 fs/xfs/libxfs/xfs_btree.c be64_to_cpu((*blkp)->bb_u.l.bb_owner) != l 2964 fs/xfs/libxfs/xfs_btree.c cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn); l 4370 fs/xfs/libxfs/xfs_btree.c if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner)) l 4372 fs/xfs/libxfs/xfs_btree.c block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner); l 4430 fs/xfs/libxfs/xfs_btree.c if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) l 4432 fs/xfs/libxfs/xfs_btree.c if (block->bb_u.l.bb_blkno != cpu_to_be64(bp->b_bn)) l 4435 fs/xfs/libxfs/xfs_btree.c be64_to_cpu(block->bb_u.l.bb_owner) != owner) l 4454 fs/xfs/libxfs/xfs_btree.c if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) && l 4455 fs/xfs/libxfs/xfs_btree.c !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))) l 4457 fs/xfs/libxfs/xfs_btree.c if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) && l 4458 fs/xfs/libxfs/xfs_btree.c !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))) l 4879 fs/xfs/libxfs/xfs_btree.c return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l); l 4929 fs/xfs/libxfs/xfs_btree.c return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK); l 24 fs/xfs/libxfs/xfs_btree.h __be64 l; /* long form ptr */ l 1631 fs/xfs/libxfs/xfs_format.h struct xfs_btree_block_lhdr l; l 1655 fs/xfs/libxfs/xfs_format.h offsetof(struct xfs_btree_block, bb_u.l.bb_crc) l 957 fs/xfs/libxfs/xfs_rtbitmap.c #define xfs_rtcheck_alloc_range(m,t,b,l) (0) l 392 fs/xfs/scrub/bmap.c owner = be64_to_cpu(block->bb_u.l.bb_owner); l 234 fs/xfs/scrub/btree.c res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level); l 283 fs/xfs/xfs_bmap_util.c nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); l 292 fs/xfs/xfs_bmap_util.c nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib); l 311 fs/xfs/xfs_bmap_util.c nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); l 43 fs/xfs/xfs_error.h #define XFS_WANT_CORRUPTED_GOTO(mp, x, l) \ l 51 fs/xfs/xfs_error.h goto l; \ l 2836 fs/xfs/xfs_log.c #define xlog_state_callback_check_state(l) ((void)0) l 2220 fs/xfs/xfs_log_recover.c lsn = be64_to_cpu(btb->bb_u.l.bb_lsn); l 2221 fs/xfs/xfs_log_recover.c uuid = &btb->bb_u.l.bb_uuid; l 60 fs/xfs/xfs_message.h extern void assfail(char *expr, char *f, int l); l 61 fs/xfs/xfs_message.h extern void asswarn(char *expr, char *f, int l); l 625 fs/xfs/xfs_rtalloc.c int l; /* level number (loop control) */ l 640 fs/xfs/xfs_rtalloc.c for (l = xfs_highbit32(maxlen); l < mp->m_rsumlevels; l++) { l 648 fs/xfs/xfs_rtalloc.c error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, l 698 fs/xfs/xfs_rtalloc.c for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) { l 707 fs/xfs/xfs_rtalloc.c error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, l 723 fs/xfs/xfs_rtalloc.c XFS_RTMAX(minlen, 1 << l), l 724 fs/xfs/xfs_rtalloc.c XFS_RTMIN(maxlen, (1 << (l + 1)) - 1), l 139 fs/xfs/xfs_rtalloc.h # define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS) l 140 fs/xfs/xfs_rtalloc.h # define xfs_rtfree_extent(t,b,l) (ENOSYS) l 141 fs/xfs/xfs_rtalloc.h # define xfs_rtpick_extent(m,t,l,rb) (ENOSYS) l 143 fs/xfs/xfs_rtalloc.h # define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS) l 147 fs/xfs/xfs_rtalloc.h # define xfs_rtalloc_extent_is_free(m,t,s,l,i) (ENOSYS) l 71 fs/xfs/xfs_trans_ail.c #define xfs_ail_check(a,l) l 209 include/acpi/acoutput.h #define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i) l 222 include/acpi/acoutput.h #define ACPI_DEBUG_OBJECT(obj,l,i) l 20 include/acpi/platform/acgcc.h #define va_start(v, l) __builtin_va_start(v, l) l 22 include/acpi/platform/acgcc.h #define va_arg(v, l) __builtin_va_arg(v, l) l 9 include/asm-generic/bitops/ext2-atomic-setbit.h #define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr) l 10 include/asm-generic/bitops/ext2-atomic-setbit.h #define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr) l 29 include/asm-generic/local.h #define local_read(l) atomic_long_read(&(l)->a) l 30 include/asm-generic/local.h #define local_set(l,i) atomic_long_set((&(l)->a),(i)) l 31 include/asm-generic/local.h #define local_inc(l) atomic_long_inc(&(l)->a) l 32 include/asm-generic/local.h #define local_dec(l) atomic_long_dec(&(l)->a) l 33 include/asm-generic/local.h #define local_add(i,l) atomic_long_add((i),(&(l)->a)) l 34 include/asm-generic/local.h #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) l 36 include/asm-generic/local.h #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) l 37 include/asm-generic/local.h #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) l 38 include/asm-generic/local.h #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) l 39 include/asm-generic/local.h #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) l 40 include/asm-generic/local.h #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) l 41 include/asm-generic/local.h #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) l 42 include/asm-generic/local.h #define local_inc_return(l) atomic_long_inc_return(&(l)->a) l 44 include/asm-generic/local.h #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) l 45 include/asm-generic/local.h #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) l 46 include/asm-generic/local.h #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) l 47 include/asm-generic/local.h #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) l 51 include/asm-generic/local.h #define __local_inc(l) local_set((l), local_read(l) + 1) l 52 include/asm-generic/local.h #define __local_dec(l) local_set((l), local_read(l) - 1) l 53 include/asm-generic/local.h #define __local_add(i,l) local_set((l), local_read(l) + (i)) l 54 include/asm-generic/local.h #define __local_sub(i,l) local_set((l), local_read(l) - (i)) l 30 include/asm-generic/local64.h #define local64_read(l) local_read(&(l)->a) l 31 include/asm-generic/local64.h #define local64_set(l,i) local_set((&(l)->a),(i)) l 32 include/asm-generic/local64.h #define local64_inc(l) local_inc(&(l)->a) l 33 include/asm-generic/local64.h #define local64_dec(l) local_dec(&(l)->a) l 34 include/asm-generic/local64.h #define local64_add(i,l) local_add((i),(&(l)->a)) l 35 include/asm-generic/local64.h #define local64_sub(i,l) local_sub((i),(&(l)->a)) l 37 include/asm-generic/local64.h #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) l 38 include/asm-generic/local64.h #define local64_dec_and_test(l) local_dec_and_test(&(l)->a) l 39 include/asm-generic/local64.h #define local64_inc_and_test(l) local_inc_and_test(&(l)->a) l 40 include/asm-generic/local64.h #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) l 41 include/asm-generic/local64.h #define local64_add_return(i, l) local_add_return((i), (&(l)->a)) l 42 include/asm-generic/local64.h #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) l 43 include/asm-generic/local64.h #define local64_inc_return(l) local_inc_return(&(l)->a) l 45 include/asm-generic/local64.h #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) l 46 include/asm-generic/local64.h #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) l 47 include/asm-generic/local64.h #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) l 48 include/asm-generic/local64.h #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) l 52 include/asm-generic/local64.h #define __local64_inc(l) local64_set((l), local64_read(l) + 1) l 53 include/asm-generic/local64.h #define __local64_dec(l) local64_set((l), local64_read(l) - 1) l 54 include/asm-generic/local64.h #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) l 55 include/asm-generic/local64.h #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) l 68 include/asm-generic/local64.h #define local64_read(l) atomic64_read(&(l)->a) l 69 include/asm-generic/local64.h #define local64_set(l,i) atomic64_set((&(l)->a),(i)) l 70 include/asm-generic/local64.h #define local64_inc(l) atomic64_inc(&(l)->a) l 71 include/asm-generic/local64.h #define local64_dec(l) atomic64_dec(&(l)->a) l 72 include/asm-generic/local64.h #define local64_add(i,l) atomic64_add((i),(&(l)->a)) l 73 include/asm-generic/local64.h #define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) l 75 include/asm-generic/local64.h #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) l 76 include/asm-generic/local64.h #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) l 77 include/asm-generic/local64.h #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) l 78 include/asm-generic/local64.h #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) l 79 include/asm-generic/local64.h #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) l 80 include/asm-generic/local64.h #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) l 81 include/asm-generic/local64.h #define local64_inc_return(l) atomic64_inc_return(&(l)->a) l 83 include/asm-generic/local64.h #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) l 84 include/asm-generic/local64.h #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) l 85 include/asm-generic/local64.h #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) l 86 include/asm-generic/local64.h #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) l 90 include/asm-generic/local64.h #define __local64_inc(l) local64_set((l), local64_read(l) + 1) l 91 include/asm-generic/local64.h #define __local64_dec(l) local64_set((l), local64_read(l) - 1) l 92 include/asm-generic/local64.h #define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) l 93 include/asm-generic/local64.h #define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) l 123 include/asm-generic/qrwlock.h #define arch_read_lock(l) queued_read_lock(l) l 124 include/asm-generic/qrwlock.h #define arch_write_lock(l) queued_write_lock(l) l 125 include/asm-generic/qrwlock.h #define arch_read_trylock(l) queued_read_trylock(l) l 126 include/asm-generic/qrwlock.h #define arch_write_trylock(l) queued_write_trylock(l) l 127 include/asm-generic/qrwlock.h #define arch_read_unlock(l) queued_read_unlock(l) l 128 include/asm-generic/qrwlock.h #define arch_write_unlock(l) queued_write_unlock(l) l 109 include/asm-generic/qspinlock.h #define arch_spin_is_locked(l) queued_spin_is_locked(l) l 110 include/asm-generic/qspinlock.h #define arch_spin_is_contended(l) queued_spin_is_contended(l) l 111 include/asm-generic/qspinlock.h #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l) l 112 include/asm-generic/qspinlock.h #define arch_spin_lock(l) queued_spin_lock(l) l 113 include/asm-generic/qspinlock.h #define arch_spin_trylock(l) queued_spin_trylock(l) l 114 include/asm-generic/qspinlock.h #define arch_spin_unlock(l) queued_spin_unlock(l) l 346 include/clocksource/timer-ti-dm.h u32 l; l 348 include/clocksource/timer-ti-dm.h l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); l 349 include/clocksource/timer-ti-dm.h if (l & OMAP_TIMER_CTRL_ST) { l 350 include/clocksource/timer-ti-dm.h l &= ~0x1; l 351 include/clocksource/timer-ti-dm.h __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); l 44 include/kvm/iodev.h int l, void *v) l 46 include/kvm/iodev.h return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v) l 52 include/kvm/iodev.h int l, const void *v) l 54 include/kvm/iodev.h return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v) l 247 include/linux/amba/clcd.h #define CHECK(e,l,h) (var->e < l || var->e > h) l 18 include/linux/badblocks.h #define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63)) l 167 include/linux/bitops.h static inline unsigned fls_long(unsigned long l) l 169 include/linux/bitops.h if (sizeof(l) == 4) l 170 include/linux/bitops.h return fls(l); l 171 include/linux/bitops.h return fls64(l); l 190 include/linux/bitops.h static inline int get_count_order_long(unsigned long l) l 192 include/linux/bitops.h if (l == 0UL) l 194 include/linux/bitops.h else if (l & (l - 1UL)) l 195 include/linux/bitops.h return (int)fls_long(l); l 197 include/linux/bitops.h return (int)fls_long(l) - 1; l 21 include/linux/bits.h #define GENMASK(h, l) \ l 22 include/linux/bits.h (((~UL(0)) - (UL(1) << (l)) + 1) & \ l 25 include/linux/bits.h #define GENMASK_ULL(h, l) \ l 26 include/linux/bits.h (((~ULL(0)) - (ULL(1) << (l)) + 1) & \ l 210 include/linux/btree.h #define BTREE_TYPE_SUFFIX l l 30 include/linux/ceph/pagelist.h extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); l 10 include/linux/ceph/striper.h void ceph_calc_file_object_mapping(struct ceph_file_layout *l, l 36 include/linux/ceph/striper.h int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len, l 42 include/linux/ceph/striper.h int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len, l 64 include/linux/ceph/striper.h int ceph_extent_to_file(struct ceph_file_layout *l, l 69 include/linux/ceph/striper.h u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size); l 57 include/linux/dcache.h #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } l 40 include/linux/fscrypt.h #define FSTR_INIT(n, l) { .name = n, .len = l } l 556 include/linux/hugetlb.h static inline void hugetlb_count_add(long l, struct mm_struct *mm) l 558 include/linux/hugetlb.h atomic_long_add(l, &mm->hugetlb_usage); l 561 include/linux/hugetlb.h static inline void hugetlb_count_sub(long l, struct mm_struct *mm) l 563 include/linux/hugetlb.h atomic_long_sub(l, &mm->hugetlb_usage); l 732 include/linux/hugetlb.h static inline void hugetlb_count_sub(long l, struct mm_struct *mm) l 24 include/linux/iio/sysfs.h struct list_head l; l 137 include/linux/isdn/capiutil.h unsigned l, p; l 444 include/linux/lightnvm.h struct ppa_addr l; l 449 include/linux/lightnvm.h l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; l 450 include/linux/lightnvm.h l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; l 451 include/linux/lightnvm.h l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; l 452 include/linux/lightnvm.h l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; l 453 include/linux/lightnvm.h l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; l 454 include/linux/lightnvm.h l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; l 458 include/linux/lightnvm.h l.ppa = ((u64)r.m.grp) << lbaf->ch_offset; l 459 include/linux/lightnvm.h l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset; l 460 include/linux/lightnvm.h l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset; l 461 include/linux/lightnvm.h l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset; l 464 include/linux/lightnvm.h return l; l 471 include/linux/lightnvm.h struct ppa_addr l; l 473 include/linux/lightnvm.h l.ppa = 0; l 478 include/linux/lightnvm.h l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; l 479 include/linux/lightnvm.h l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; l 480 include/linux/lightnvm.h l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; l 481 include/linux/lightnvm.h l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; l 482 include/linux/lightnvm.h l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; l 483 include/linux/lightnvm.h l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; l 487 include/linux/lightnvm.h l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset; l 488 include/linux/lightnvm.h l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset; l 489 include/linux/lightnvm.h l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset; l 490 include/linux/lightnvm.h l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset; l 493 include/linux/lightnvm.h return l; l 390 include/linux/lockdep.h #define lockdep_assert_held(l) do { \ l 391 include/linux/lockdep.h WARN_ON(debug_locks && !lockdep_is_held(l)); \ l 394 include/linux/lockdep.h #define lockdep_assert_held_write(l) do { \ l 395 include/linux/lockdep.h WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ l 398 include/linux/lockdep.h #define lockdep_assert_held_read(l) do { \ l 399 include/linux/lockdep.h WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ l 402 include/linux/lockdep.h #define lockdep_assert_held_once(l) do { \ l 403 include/linux/lockdep.h WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ l 408 include/linux/lockdep.h #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) l 409 include/linux/lockdep.h #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) l 410 include/linux/lockdep.h #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) l 430 include/linux/lockdep.h # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) l 431 include/linux/lockdep.h # define lock_release(l, n, i) do { } while (0) l 432 include/linux/lockdep.h # define lock_downgrade(l, i) do { } while (0) l 433 include/linux/lockdep.h # define lock_set_class(l, n, k, s, i) do { } while (0) l 434 include/linux/lockdep.h # define lock_set_subclass(l, s, i) do { } while (0) l 476 include/linux/lockdep.h #define lockdep_is_held_type(l, r) (1) l 478 include/linux/lockdep.h #define lockdep_assert_held(l) do { (void)(l); } while (0) l 479 include/linux/lockdep.h #define lockdep_assert_held_write(l) do { (void)(l); } while (0) l 480 include/linux/lockdep.h #define lockdep_assert_held_read(l) do { (void)(l); } while (0) l 481 include/linux/lockdep.h #define lockdep_assert_held_once(l) do { (void)(l); } while (0) l 489 include/linux/lockdep.h #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) l 490 include/linux/lockdep.h #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) l 491 include/linux/lockdep.h #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) l 588 include/linux/lockdep.h #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) l 589 include/linux/lockdep.h #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) l 590 include/linux/lockdep.h #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) l 592 include/linux/lockdep.h #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) l 593 include/linux/lockdep.h #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) l 594 include/linux/lockdep.h #define spin_release(l, n, i) lock_release(l, n, i) l 596 include/linux/lockdep.h #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) l 597 include/linux/lockdep.h #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) l 598 include/linux/lockdep.h #define rwlock_release(l, n, i) lock_release(l, n, i) l 600 include/linux/lockdep.h #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) l 601 include/linux/lockdep.h #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) l 602 include/linux/lockdep.h #define seqcount_release(l, n, i) lock_release(l, n, i) l 604 include/linux/lockdep.h #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) l 605 include/linux/lockdep.h #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) l 606 include/linux/lockdep.h #define mutex_release(l, n, i) lock_release(l, n, i) l 608 include/linux/lockdep.h #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) l 609 include/linux/lockdep.h #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) l 610 include/linux/lockdep.h #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) l 611 include/linux/lockdep.h #define rwsem_release(l, n, i) lock_release(l, n, i) l 613 include/linux/lockdep.h #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) l 614 include/linux/lockdep.h #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) l 615 include/linux/lockdep.h #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) l 616 include/linux/lockdep.h #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) l 48 include/linux/lockref.h static inline bool __lockref_is_dead(const struct lockref *l) l 50 include/linux/lockref.h return ((int)l->count < 0); l 220 include/linux/math64.h } l; l 227 include/linux/math64.h rl.ll = mul_u32_u32(a0.l.low, b0.l.low); l 228 include/linux/math64.h rm.ll = mul_u32_u32(a0.l.low, b0.l.high); l 229 include/linux/math64.h rn.ll = mul_u32_u32(a0.l.high, b0.l.low); l 230 include/linux/math64.h rh.ll = mul_u32_u32(a0.l.high, b0.l.high); l 237 include/linux/math64.h rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; l 238 include/linux/math64.h rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; l 239 include/linux/math64.h rh.l.high = (c >> 32) + rh.l.high; l 266 include/linux/math64.h } l; l 270 include/linux/math64.h rl.ll = mul_u32_u32(u.l.low, mul); l 271 include/linux/math64.h rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; l 274 include/linux/math64.h rl.l.high = do_div(rh.ll, divisor); l 279 include/linux/math64.h rl.l.high = rh.l.low; l 15 include/linux/mfd/tmio.h #define tmio_ioread16_rep(r, b, l) readsw(r, b, l) l 21 include/linux/mfd/tmio.h #define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) l 64 include/linux/migrate.h extern void putback_movable_pages(struct list_head *l); l 68 include/linux/migrate.h extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, l 83 include/linux/migrate.h static inline void putback_movable_pages(struct list_head *l) {} l 84 include/linux/migrate.h static inline int migrate_pages(struct list_head *l, new_page_t new, l 9678 include/linux/mlx5/mlx5_ifc.h u8 l[0x1]; l 644 include/linux/mtd/rawnand.h #define NAND_OP_DATA_IN(l, b, ns) \ l 648 include/linux/mtd/rawnand.h .len = l, \ l 655 include/linux/mtd/rawnand.h #define NAND_OP_DATA_OUT(l, b, ns) \ l 659 include/linux/mtd/rawnand.h .len = l, \ l 666 include/linux/mtd/rawnand.h #define NAND_OP_8BIT_DATA_IN(l, b, ns) \ l 670 include/linux/mtd/rawnand.h .len = l, \ l 677 include/linux/mtd/rawnand.h #define NAND_OP_8BIT_DATA_OUT(l, b, ns) \ l 681 include/linux/mtd/rawnand.h .len = l, \ l 224 include/linux/netdevice.h #define netdev_hw_addr_list_count(l) ((l)->count) l 225 include/linux/netdevice.h #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) l 226 include/linux/netdevice.h #define netdev_hw_addr_list_for_each(ha, l) \ l 227 include/linux/netdevice.h list_for_each_entry(ha, &(l)->list, list) l 986 include/linux/of.h #define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) l 77 include/linux/seqlock.h seqcount_t *l = (seqcount_t *)s; l 81 include/linux/seqlock.h seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); l 82 include/linux/seqlock.h seqcount_release(&l->dep_map, 1, _RET_IP_); l 27 include/linux/sunrpc/xdr.h #define XDR_QUADLEN(l) (((l) + 3) >> 2) l 111 include/linux/time.h #define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l)) l 373 include/linux/uaccess.h #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) l 105 include/linux/userfaultfd_k.h struct list_head *l) l 110 include/linux/userfaultfd_k.h static inline void dup_userfaultfd_complete(struct list_head *l) l 176 include/linux/wimax/debug.h #define _d_printf(l, tag, dev, f, a...) \ l 179 include/linux/wimax/debug.h if (!d_test(l)) \ l 343 include/linux/wimax/debug.h #define d_test(l) \ l 345 include/linux/wimax/debug.h unsigned __l = l; /* type enforcer */ \ l 361 include/linux/wimax/debug.h #define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a) l 371 include/linux/wimax/debug.h #define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a) l 381 include/linux/wimax/debug.h #define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a) l 391 include/linux/wimax/debug.h #define d_dump(l, dev, ptr, size) \ l 394 include/linux/wimax/debug.h if (!d_test(l)) \ l 35 include/media/i2c/ov772x.h #define OV772X_AUTO_EDGECTRL(u, l) \ l 38 include/media/i2c/ov772x.h .lower = (l & OV772X_EDGE_LOWER_MASK), \ l 268 include/net/bluetooth/bluetooth.h void bt_sock_link(struct bt_sock_list *l, struct sock *s); l 269 include/net/bluetooth/bluetooth.h void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); l 163 include/net/iucv/af_iucv.h void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); l 164 include/net/iucv/af_iucv.h void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); l 145 include/rdma/ib_hdrs.h } l; l 251 include/scsi/scsi_device.h #define sdev_printk(l, sdev, fmt, a...) \ l 252 include/scsi/scsi_device.h sdev_prefix_printk(l, sdev, NULL, fmt, ##a) l 980 include/sound/pcm.h const struct snd_pcm_hw_constraint_list *l); l 333 include/trace/events/btrfs.h TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, l 336 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, start), l 359 include/trace/events/btrfs.h __entry->num_bytes = btrfs_file_extent_num_bytes(l, fi); l 360 include/trace/events/btrfs.h __entry->ram_bytes = btrfs_file_extent_ram_bytes(l, fi); l 361 include/trace/events/btrfs.h __entry->disk_bytenr = btrfs_file_extent_disk_bytenr(l, fi); l 362 include/trace/events/btrfs.h __entry->disk_num_bytes = btrfs_file_extent_disk_num_bytes(l, fi); l 363 include/trace/events/btrfs.h __entry->extent_offset = btrfs_file_extent_offset(l, fi); l 364 include/trace/events/btrfs.h __entry->extent_type = btrfs_file_extent_type(l, fi); l 365 include/trace/events/btrfs.h __entry->compression = btrfs_file_extent_compression(l, fi); l 388 include/trace/events/btrfs.h TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, l 391 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, slot, start), l 410 include/trace/events/btrfs.h __entry->extent_type = btrfs_file_extent_type(l, fi); l 411 include/trace/events/btrfs.h __entry->compression = btrfs_file_extent_compression(l, fi); l 413 include/trace/events/btrfs.h __entry->extent_end = (start + btrfs_file_extent_ram_bytes(l, fi)); l 429 include/trace/events/btrfs.h TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, l 432 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, start) l 438 include/trace/events/btrfs.h TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, l 441 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, start) l 447 include/trace/events/btrfs.h TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, l 450 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, slot, start) l 456 include/trace/events/btrfs.h TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l, l 459 include/trace/events/btrfs.h TP_ARGS(bi, l, fi, slot, start) l 58 include/uapi/linux/atm.h #define __SO_ENCODE(l,n,t) ((((l) & 0x1FF) << 22) | ((n) << 16) | \ l 98 include/uapi/linux/map_to_7segment.h #define _SEG7(l,a,b,c,d,e,f,g) \ l 17 include/uapi/linux/romfs_fs.h #define __mkw(h,l) (((h)&0x00ff)<< 8|((l)&0x00ff)) l 18 include/uapi/linux/romfs_fs.h #define __mkl(h,l) (((h)&0xffff)<<16|((l)&0xffff)) l 72 include/uapi/linux/swab.h __u32 l = val & ((1ULL << 32) - 1); l 73 include/uapi/linux/swab.h return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); l 591 include/video/pm3fb.h #define PM3FBDestReadMode_Layout0(l) (((l) & 0x3) << 12) l 592 include/video/pm3fb.h #define PM3FBDestReadMode_Layout1(l) (((l) & 0x3) << 14) l 593 include/video/pm3fb.h #define PM3FBDestReadMode_Layout2(l) (((l) & 0x3) << 16) l 594 include/video/pm3fb.h #define PM3FBDestReadMode_Layout3(l) (((l) & 0x3) << 18) l 620 include/video/pm3fb.h #define PM3FBSourceReadMode_Layout(l) (((l) & 0x3) << 8) l 660 include/video/pm3fb.h #define PM3FBWriteMode_Layout0(l) (((l) & 0x3) << 16) l 661 include/video/pm3fb.h #define PM3FBWriteMode_Layout1(l) (((l) & 0x3) << 18) l 662 include/video/pm3fb.h #define PM3FBWriteMode_Layout2(l) (((l) & 0x3) << 20) l 663 include/video/pm3fb.h #define PM3FBWriteMode_Layout3(l) (((l) & 0x3) << 22) l 1090 ipc/sem.c struct list_head *l; l 1097 ipc/sem.c l = &sma->sems[semnum].pending_const; l 1099 ipc/sem.c l = &sma->sems[semnum].pending_alter; l 1101 ipc/sem.c list_for_each_entry(q, l, list) { l 297 kernel/audit.h #define audit_to_watch(k, p, l, o) (-EINVAL) l 298 kernel/audit.h #define audit_add_watch(k, l) (-EINVAL) l 303 kernel/audit.h #define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL)) l 47 kernel/bpf/bpf_lru_list.c static void bpf_lru_list_count_inc(struct bpf_lru_list *l, l 51 kernel/bpf/bpf_lru_list.c l->counts[type]++; l 54 kernel/bpf/bpf_lru_list.c static void bpf_lru_list_count_dec(struct bpf_lru_list *l, l 58 kernel/bpf/bpf_lru_list.c l->counts[type]--; l 61 kernel/bpf/bpf_lru_list.c static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l, l 72 kernel/bpf/bpf_lru_list.c if (&node->list == l->next_inactive_rotation) l 73 kernel/bpf/bpf_lru_list.c l->next_inactive_rotation = l->next_inactive_rotation->prev; l 75 kernel/bpf/bpf_lru_list.c bpf_lru_list_count_dec(l, node->type); l 82 kernel/bpf/bpf_lru_list.c static void __bpf_lru_node_move_in(struct bpf_lru_list *l, l 90 kernel/bpf/bpf_lru_list.c bpf_lru_list_count_inc(l, tgt_type); l 93 kernel/bpf/bpf_lru_list.c list_move(&node->list, &l->lists[tgt_type]); l 100 kernel/bpf/bpf_lru_list.c static void __bpf_lru_node_move(struct bpf_lru_list *l, l 109 kernel/bpf/bpf_lru_list.c bpf_lru_list_count_dec(l, node->type); l 110 kernel/bpf/bpf_lru_list.c bpf_lru_list_count_inc(l, tgt_type); l 118 kernel/bpf/bpf_lru_list.c if (&node->list == l->next_inactive_rotation) l 119 kernel/bpf/bpf_lru_list.c l->next_inactive_rotation = l->next_inactive_rotation->prev; l 121 kernel/bpf/bpf_lru_list.c list_move(&node->list, &l->lists[tgt_type]); l 124 kernel/bpf/bpf_lru_list.c static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l) l 126 kernel/bpf/bpf_lru_list.c return l->counts[BPF_LRU_LIST_T_INACTIVE] < l 127 kernel/bpf/bpf_lru_list.c l->counts[BPF_LRU_LIST_T_ACTIVE]; l 140 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l) l 142 kernel/bpf/bpf_lru_list.c struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; l 149 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); l 151 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); l 167 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l) l 169 kernel/bpf/bpf_lru_list.c struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; l 177 kernel/bpf/bpf_lru_list.c last = l->next_inactive_rotation->next; l 181 kernel/bpf/bpf_lru_list.c cur = l->next_inactive_rotation; l 191 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); l 198 kernel/bpf/bpf_lru_list.c l->next_inactive_rotation = next; l 207 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l, l 212 kernel/bpf/bpf_lru_list.c struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; l 219 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); l 221 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move_to_free(l, node, free_list, l 237 kernel/bpf/bpf_lru_list.c static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) l 239 kernel/bpf/bpf_lru_list.c if (bpf_lru_list_inactive_low(l)) l 240 kernel/bpf/bpf_lru_list.c __bpf_lru_list_rotate_active(lru, l); l 242 kernel/bpf/bpf_lru_list.c __bpf_lru_list_rotate_inactive(lru, l); l 256 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l, l 266 kernel/bpf/bpf_lru_list.c nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink, l 272 kernel/bpf/bpf_lru_list.c if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE])) l 273 kernel/bpf/bpf_lru_list.c force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE]; l 275 kernel/bpf/bpf_lru_list.c force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE]; l 280 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move_to_free(l, node, free_list, l 290 kernel/bpf/bpf_lru_list.c static void __local_list_flush(struct bpf_lru_list *l, l 298 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE); l 300 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move_in(l, node, l 305 kernel/bpf/bpf_lru_list.c static void bpf_lru_list_push_free(struct bpf_lru_list *l, l 313 kernel/bpf/bpf_lru_list.c raw_spin_lock_irqsave(&l->lock, flags); l 314 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); l 315 kernel/bpf/bpf_lru_list.c raw_spin_unlock_irqrestore(&l->lock, flags); l 321 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l = &lru->common_lru.lru_list; l 325 kernel/bpf/bpf_lru_list.c raw_spin_lock(&l->lock); l 327 kernel/bpf/bpf_lru_list.c __local_list_flush(l, loc_l); l 329 kernel/bpf/bpf_lru_list.c __bpf_lru_list_rotate(lru, l); l 331 kernel/bpf/bpf_lru_list.c list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE], l 333 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l), l 340 kernel/bpf/bpf_lru_list.c __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, l 344 kernel/bpf/bpf_lru_list.c raw_spin_unlock(&l->lock); l 404 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l; l 408 kernel/bpf/bpf_lru_list.c l = per_cpu_ptr(lru->percpu_lru, cpu); l 410 kernel/bpf/bpf_lru_list.c raw_spin_lock_irqsave(&l->lock, flags); l 412 kernel/bpf/bpf_lru_list.c __bpf_lru_list_rotate(lru, l); l 414 kernel/bpf/bpf_lru_list.c free_list = &l->lists[BPF_LRU_LIST_T_FREE]; l 416 kernel/bpf/bpf_lru_list.c __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list, l 423 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); l 426 kernel/bpf/bpf_lru_list.c raw_spin_unlock_irqrestore(&l->lock, flags); l 538 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l; l 541 kernel/bpf/bpf_lru_list.c l = per_cpu_ptr(lru->percpu_lru, node->cpu); l 543 kernel/bpf/bpf_lru_list.c raw_spin_lock_irqsave(&l->lock, flags); l 545 kernel/bpf/bpf_lru_list.c __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); l 547 kernel/bpf/bpf_lru_list.c raw_spin_unlock_irqrestore(&l->lock, flags); l 562 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l = &lru->common_lru.lru_list; l 571 kernel/bpf/bpf_lru_list.c list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); l 582 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l; l 591 kernel/bpf/bpf_lru_list.c l = per_cpu_ptr(lru->percpu_lru, cpu); l 597 kernel/bpf/bpf_lru_list.c list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); l 630 kernel/bpf/bpf_lru_list.c static void bpf_lru_list_init(struct bpf_lru_list *l) l 635 kernel/bpf/bpf_lru_list.c INIT_LIST_HEAD(&l->lists[i]); l 638 kernel/bpf/bpf_lru_list.c l->counts[i] = 0; l 640 kernel/bpf/bpf_lru_list.c l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE]; l 642 kernel/bpf/bpf_lru_list.c raw_spin_lock_init(&l->lock); l 656 kernel/bpf/bpf_lru_list.c struct bpf_lru_list *l; l 658 kernel/bpf/bpf_lru_list.c l = per_cpu_ptr(lru->percpu_lru, cpu); l 659 kernel/bpf/bpf_lru_list.c bpf_lru_list_init(l); l 79 kernel/bpf/hashtab.c static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, l 82 kernel/bpf/hashtab.c *(void __percpu **)(l->key + key_size) = pptr; l 85 kernel/bpf/hashtab.c static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) l 87 kernel/bpf/hashtab.c return *(void __percpu **)(l->key + key_size); l 90 kernel/bpf/hashtab.c static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) l 92 kernel/bpf/hashtab.c return *(void **)(l->key + roundup(map->key_size, 8)); l 123 kernel/bpf/hashtab.c struct htab_elem *l; l 126 kernel/bpf/hashtab.c l = container_of(node, struct htab_elem, lru_node); l 127 kernel/bpf/hashtab.c memcpy(l->key, key, htab->map.key_size); l 128 kernel/bpf/hashtab.c return l; l 205 kernel/bpf/hashtab.c struct pcpu_freelist_node *l; l 214 kernel/bpf/hashtab.c l = pcpu_freelist_pop(&htab->freelist); l 218 kernel/bpf/hashtab.c l_new = container_of(l, struct htab_elem, fnode); l 425 kernel/bpf/hashtab.c struct htab_elem *l; l 427 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) l 428 kernel/bpf/hashtab.c if (l->hash == hash && !memcmp(&l->key, key, key_size)) l 429 kernel/bpf/hashtab.c return l; l 443 kernel/bpf/hashtab.c struct htab_elem *l; l 446 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) l 447 kernel/bpf/hashtab.c if (l->hash == hash && !memcmp(&l->key, key, key_size)) l 448 kernel/bpf/hashtab.c return l; l 465 kernel/bpf/hashtab.c struct htab_elem *l; l 477 kernel/bpf/hashtab.c l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); l 479 kernel/bpf/hashtab.c return l; l 484 kernel/bpf/hashtab.c struct htab_elem *l = __htab_map_lookup_elem(map, key); l 486 kernel/bpf/hashtab.c if (l) l 487 kernel/bpf/hashtab.c return l->key + round_up(map->key_size, 8); l 521 kernel/bpf/hashtab.c struct htab_elem *l = __htab_map_lookup_elem(map, key); l 523 kernel/bpf/hashtab.c if (l) { l 525 kernel/bpf/hashtab.c bpf_lru_node_set_ref(&l->lru_node); l 526 kernel/bpf/hashtab.c return l->key + round_up(map->key_size, 8); l 573 kernel/bpf/hashtab.c struct htab_elem *l = NULL, *tgt_l; l 585 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) l 586 kernel/bpf/hashtab.c if (l == tgt_l) { l 587 kernel/bpf/hashtab.c hlist_nulls_del_rcu(&l->hash_node); l 593 kernel/bpf/hashtab.c return l == tgt_l; l 601 kernel/bpf/hashtab.c struct htab_elem *l, *next_l; l 617 kernel/bpf/hashtab.c l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); l 619 kernel/bpf/hashtab.c if (!l) l 623 kernel/bpf/hashtab.c next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), l 655 kernel/bpf/hashtab.c static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) l 658 kernel/bpf/hashtab.c free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); l 659 kernel/bpf/hashtab.c kfree(l); l 664 kernel/bpf/hashtab.c struct htab_elem *l = container_of(head, struct htab_elem, rcu); l 665 kernel/bpf/hashtab.c struct bpf_htab *htab = l->htab; l 673 kernel/bpf/hashtab.c htab_elem_free(htab, l); l 678 kernel/bpf/hashtab.c static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) l 683 kernel/bpf/hashtab.c void *ptr = fd_htab_map_get_ptr(map, l); l 689 kernel/bpf/hashtab.c __pcpu_freelist_push(&htab->freelist, &l->fnode); l 692 kernel/bpf/hashtab.c l->htab = htab; l 693 kernel/bpf/hashtab.c call_rcu(&l->rcu, htab_elem_free_rcu); l 740 kernel/bpf/hashtab.c struct pcpu_freelist_node *l; l 742 kernel/bpf/hashtab.c l = __pcpu_freelist_pop(&htab->freelist); l 743 kernel/bpf/hashtab.c if (!l) l 745 kernel/bpf/hashtab.c l_new = container_of(l, struct htab_elem, fnode); l 1112 kernel/bpf/hashtab.c struct htab_elem *l; l 1127 kernel/bpf/hashtab.c l = lookup_elem_raw(head, hash, key, key_size); l 1129 kernel/bpf/hashtab.c if (l) { l 1130 kernel/bpf/hashtab.c hlist_nulls_del_rcu(&l->hash_node); l 1131 kernel/bpf/hashtab.c free_htab_elem(htab, l); l 1144 kernel/bpf/hashtab.c struct htab_elem *l; l 1159 kernel/bpf/hashtab.c l = lookup_elem_raw(head, hash, key, key_size); l 1161 kernel/bpf/hashtab.c if (l) { l 1162 kernel/bpf/hashtab.c hlist_nulls_del_rcu(&l->hash_node); l 1167 kernel/bpf/hashtab.c if (l) l 1168 kernel/bpf/hashtab.c bpf_lru_push_free(&htab->lru, &l->lru_node); l 1179 kernel/bpf/hashtab.c struct htab_elem *l; l 1181 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { l 1182 kernel/bpf/hashtab.c hlist_nulls_del_rcu(&l->hash_node); l 1183 kernel/bpf/hashtab.c htab_elem_free(htab, l); l 1263 kernel/bpf/hashtab.c struct htab_elem *l = __htab_map_lookup_elem(map, key); l 1265 kernel/bpf/hashtab.c if (l) l 1266 kernel/bpf/hashtab.c return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); l 1273 kernel/bpf/hashtab.c struct htab_elem *l = __htab_map_lookup_elem(map, key); l 1275 kernel/bpf/hashtab.c if (l) { l 1276 kernel/bpf/hashtab.c bpf_lru_node_set_ref(&l->lru_node); l 1277 kernel/bpf/hashtab.c return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); l 1285 kernel/bpf/hashtab.c struct htab_elem *l; l 1297 kernel/bpf/hashtab.c l = __htab_map_lookup_elem(map, key); l 1298 kernel/bpf/hashtab.c if (!l) l 1303 kernel/bpf/hashtab.c pptr = htab_elem_get_ptr(l, map->key_size); l 1336 kernel/bpf/hashtab.c struct htab_elem *l; l 1342 kernel/bpf/hashtab.c l = __htab_map_lookup_elem(map, key); l 1343 kernel/bpf/hashtab.c if (!l) { l 1350 kernel/bpf/hashtab.c pptr = htab_elem_get_ptr(l, map->key_size); l 1396 kernel/bpf/hashtab.c struct htab_elem *l; l 1402 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { l 1403 kernel/bpf/hashtab.c void *ptr = fd_htab_map_get_ptr(map, l); l 223 kernel/bpf/helpers.c arch_spinlock_t *l = (void *)lock; l 230 kernel/bpf/helpers.c BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); l 232 kernel/bpf/helpers.c arch_spin_lock(l); l 237 kernel/bpf/helpers.c arch_spinlock_t *l = (void *)lock; l 239 kernel/bpf/helpers.c arch_spin_unlock(l); l 246 kernel/bpf/helpers.c atomic_t *l = (void *)lock; l 248 kernel/bpf/helpers.c BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); l 250 kernel/bpf/helpers.c atomic_cond_read_relaxed(l, !VAL); l 251 kernel/bpf/helpers.c } while (atomic_xchg(l, 1)); l 256 kernel/bpf/helpers.c atomic_t *l = (void *)lock; l 258 kernel/bpf/helpers.c atomic_set_release(l, 0); l 42 kernel/bpf/offload.c struct rhash_head l; l 54 kernel/bpf/offload.c .head_offset = offsetof(struct bpf_offload_netdev, l), l 611 kernel/bpf/offload.c err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); l 642 kernel/bpf/offload.c WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); l 202 kernel/cgroup/cgroup-v1.c struct cgroup_pidlist *l, *tmp_l; l 205 kernel/cgroup/cgroup-v1.c list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) l 206 kernel/cgroup/cgroup-v1.c mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); l 216 kernel/cgroup/cgroup-v1.c struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, l 220 kernel/cgroup/cgroup-v1.c mutex_lock(&l->owner->pidlist_mutex); l 227 kernel/cgroup/cgroup-v1.c list_del(&l->links); l 228 kernel/cgroup/cgroup-v1.c kvfree(l->list); l 229 kernel/cgroup/cgroup-v1.c put_pid_ns(l->key.ns); l 230 kernel/cgroup/cgroup-v1.c tofree = l; l 233 kernel/cgroup/cgroup-v1.c mutex_unlock(&l->owner->pidlist_mutex); l 284 kernel/cgroup/cgroup-v1.c struct cgroup_pidlist *l; l 290 kernel/cgroup/cgroup-v1.c list_for_each_entry(l, &cgrp->pidlists, links) l 291 kernel/cgroup/cgroup-v1.c if (l->key.type == type && l->key.ns == ns) l 292 kernel/cgroup/cgroup-v1.c return l; l 305 kernel/cgroup/cgroup-v1.c struct cgroup_pidlist *l; l 309 kernel/cgroup/cgroup-v1.c l = cgroup_pidlist_find(cgrp, type); l 310 kernel/cgroup/cgroup-v1.c if (l) l 311 kernel/cgroup/cgroup-v1.c return l; l 314 kernel/cgroup/cgroup-v1.c l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); l 315 kernel/cgroup/cgroup-v1.c if (!l) l 316 kernel/cgroup/cgroup-v1.c return l; l 318 kernel/cgroup/cgroup-v1.c INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); l 319 kernel/cgroup/cgroup-v1.c l->key.type = type; l 321 kernel/cgroup/cgroup-v1.c l->key.ns = get_pid_ns(task_active_pid_ns(current)); l 322 kernel/cgroup/cgroup-v1.c l->owner = cgrp; l 323 kernel/cgroup/cgroup-v1.c list_add(&l->links, &cgrp->pidlists); l 324 kernel/cgroup/cgroup-v1.c return l; l 338 kernel/cgroup/cgroup-v1.c struct cgroup_pidlist *l; l 372 kernel/cgroup/cgroup-v1.c l = cgroup_pidlist_find_create(cgrp, type); l 373 kernel/cgroup/cgroup-v1.c if (!l) { l 379 kernel/cgroup/cgroup-v1.c kvfree(l->list); l 380 kernel/cgroup/cgroup-v1.c l->list = array; l 381 kernel/cgroup/cgroup-v1.c l->length = length; l 382 kernel/cgroup/cgroup-v1.c *lp = l; l 402 kernel/cgroup/cgroup-v1.c struct cgroup_pidlist *l; l 428 kernel/cgroup/cgroup-v1.c l = of->priv; l 431 kernel/cgroup/cgroup-v1.c int end = l->length; l 435 kernel/cgroup/cgroup-v1.c if (l->list[mid] == pid) { l 438 kernel/cgroup/cgroup-v1.c } else if (l->list[mid] <= pid) l 445 kernel/cgroup/cgroup-v1.c if (index >= l->length) l 448 kernel/cgroup/cgroup-v1.c iter = l->list + index; l 456 kernel/cgroup/cgroup-v1.c struct cgroup_pidlist *l = of->priv; l 458 kernel/cgroup/cgroup-v1.c if (l) l 459 kernel/cgroup/cgroup-v1.c mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, l 467 kernel/cgroup/cgroup-v1.c struct cgroup_pidlist *l = of->priv; l 469 kernel/cgroup/cgroup-v1.c pid_t *end = l->list + l->length; l 4393 kernel/cgroup/cgroup.c struct list_head *l; l 4401 kernel/cgroup/cgroup.c l = it->tcset_pos->next; l 4403 kernel/cgroup/cgroup.c if (l != it->tcset_head) { l 4404 kernel/cgroup/cgroup.c it->tcset_pos = l; l 4405 kernel/cgroup/cgroup.c return container_of(l, struct css_set, l 4413 kernel/cgroup/cgroup.c l = it->cset_pos; l 4414 kernel/cgroup/cgroup.c l = l->next; l 4415 kernel/cgroup/cgroup.c if (l == it->cset_head) { l 4421 kernel/cgroup/cgroup.c cset = container_of(l, struct css_set, e_cset_node[it->ss->id]); l 4423 kernel/cgroup/cgroup.c link = list_entry(l, struct cgrp_cset_link, cset_link); l 4427 kernel/cgroup/cgroup.c it->cset_pos = l; l 622 kernel/events/uprobes.c static int match_uprobe(struct uprobe *l, struct uprobe *r) l 624 kernel/events/uprobes.c if (l->inode < r->inode) l 627 kernel/events/uprobes.c if (l->inode > r->inode) l 630 kernel/events/uprobes.c if (l->offset < r->offset) l 633 kernel/events/uprobes.c if (l->offset > r->offset) l 107 kernel/irq/affinity.c static int ncpus_cmp_func(const void *l, const void *r) l 109 kernel/irq/affinity.c const struct node_vectors *ln = l; l 357 kernel/locking/lockdep_proc.c static int lock_stat_cmp(const void *l, const void *r) l 359 kernel/locking/lockdep_proc.c const struct lock_stat_data *dl = l, *dr = r; l 32 kernel/locking/mcs_spinlock.h #define arch_mcs_spin_lock_contended(l) \ l 34 kernel/locking/mcs_spinlock.h smp_cond_load_acquire(l, VAL); \ l 44 kernel/locking/mcs_spinlock.h #define arch_mcs_spin_unlock_contended(l) \ l 45 kernel/locking/mcs_spinlock.h smp_store_release((l), 1) l 80 kernel/locking/qspinlock_paravirt.h #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) l 144 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c) l 145 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c) l 146 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c) l 205 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_relaxed(l,c,n) (0) l 206 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_acquire(l,c,n) (0) l 207 kernel/locking/rtmutex.c # define rt_mutex_cmpxchg_release(l,c,n) (0) l 14 kernel/locking/rtmutex.h #define rt_mutex_deadlock_check(l) (0) l 17 kernel/locking/rtmutex.h #define debug_rt_mutex_lock(l) do { } while (0) l 18 kernel/locking/rtmutex.h #define debug_rt_mutex_proxy_lock(l,p) do { } while (0) l 19 kernel/locking/rtmutex.h #define debug_rt_mutex_proxy_unlock(l) do { } while (0) l 20 kernel/locking/rtmutex.h #define debug_rt_mutex_unlock(l) do { } while (0) l 22 kernel/locking/rtmutex.h #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) l 151 kernel/locking/rtmutex_common.h extern int rt_mutex_futex_trylock(struct rt_mutex *l); l 152 kernel/locking/rtmutex_common.h extern int __rt_mutex_futex_trylock(struct rt_mutex *l); l 49 kernel/locking/spinlock.c # define arch_read_relax(l) cpu_relax() l 52 kernel/locking/spinlock.c # define arch_write_relax(l) cpu_relax() l 55 kernel/locking/spinlock.c # define arch_spin_relax(l) cpu_relax() l 1183 kernel/module.c size_t l = 0; l 1188 kernel/module.c buf[l++] = taint_flags[i].c_true; l 1191 kernel/module.c return l; l 1252 kernel/module.c size_t l; l 1254 kernel/module.c l = module_flags_taint(mk->mod, buffer); l 1255 kernel/module.c buffer[l++] = '\n'; l 1256 kernel/module.c return l; l 3201 kernel/printk/printk.c size_t l = 0; l 3218 kernel/printk/printk.c l = msg_print_text(msg, syslog, printk_time, line, size); l 3225 kernel/printk/printk.c *len = l; l 3287 kernel/printk/printk.c size_t l = 0; l 3313 kernel/printk/printk.c l += msg_print_text(msg, true, time, NULL, 0); l 3321 kernel/printk/printk.c while (l >= size && seq < dumper->next_seq) { l 3324 kernel/printk/printk.c l -= msg_print_text(msg, true, time, NULL, 0); l 3333 kernel/printk/printk.c l = 0; l 3337 kernel/printk/printk.c l += msg_print_text(msg, syslog, time, buf + l, size - l); l 3348 kernel/printk/printk.c *len = l; l 92 kernel/resource.c loff_t l = 0; l 94 kernel/resource.c for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) l 1536 kernel/resource.c loff_t l; l 1539 kernel/resource.c for (p = p->child; p ; p = r_next(NULL, p, &l)) { l 1586 kernel/resource.c loff_t l; l 1595 kernel/resource.c for (p = p->child; p ; p = r_next(NULL, p, &l)) { l 28 kernel/sched/cpudeadline.c int l, r, largest; l 40 kernel/sched/cpudeadline.c l = left_child(idx); l 45 kernel/sched/cpudeadline.c if ((l < cp->size) && dl_time_before(orig_dl, l 46 kernel/sched/cpudeadline.c cp->elements[l].dl)) { l 47 kernel/sched/cpudeadline.c largest = l; l 48 kernel/sched/cpudeadline.c largest_dl = cp->elements[l].dl; l 3150 kernel/trace/ftrace.c loff_t l; l 3161 kernel/trace/ftrace.c for (l = 0; l <= (*pos - iter->mod_pos); ) { l 3162 kernel/trace/ftrace.c p = t_probe_next(m, &l); l 3225 kernel/trace/ftrace.c loff_t l; l 3236 kernel/trace/ftrace.c for (l = 0; l <= (*pos - iter->func_pos); ) { l 3237 kernel/trace/ftrace.c p = t_mod_next(m, &l); l 3316 kernel/trace/ftrace.c loff_t l = *pos; /* t_probe_start() must use original pos */ l 3331 kernel/trace/ftrace.c return t_mod_start(m, &l); l 3337 kernel/trace/ftrace.c return t_mod_start(m, &l); l 3353 kernel/trace/ftrace.c loff_t l; l 3392 kernel/trace/ftrace.c for (l = 0; l <= *pos; ) { l 3393 kernel/trace/ftrace.c p = t_func_next(m, &l); l 464 kernel/trace/trace.c loff_t l = 0; l 471 kernel/trace/trace.c for (pid++; pid && l < *pos; l 472 kernel/trace/trace.c pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) l 3448 kernel/trace/trace.c loff_t l = 0; l 3482 kernel/trace/trace.c for (p = iter; p && l < *pos; p = s_next(m, p, &l)) l 3493 kernel/trace/trace.c l = *pos - 1; l 3494 kernel/trace/trace.c p = s_next(m, p, &l); l 4339 kernel/trace/trace.c loff_t l = 0; l 4344 kernel/trace/trace.c for (; t && l < *pos; t = t_next(m, t, &l)) l 5049 kernel/trace/trace.c loff_t l = 0; l 5055 kernel/trace/trace.c while (l <= *pos) { l 5056 kernel/trace/trace.c v = saved_tgids_next(m, v, &l); l 5125 kernel/trace/trace.c loff_t l = 0; l 5131 kernel/trace/trace.c while (l <= *pos) { l 5132 kernel/trace/trace.c v = saved_cmdlines_next(m, v, &l); l 5293 kernel/trace/trace.c loff_t l = 0; l 5301 kernel/trace/trace.c while (v && l < *pos) { l 5302 kernel/trace/trace.c v = eval_map_next(m, v, &l); l 301 kernel/trace/trace_branch.c int l; l 308 kernel/trace/trace_branch.c l = snprintf(NULL, 0, "/%lu", p->constant); l 309 kernel/trace/trace_branch.c l = l > 8 ? 0 : 8 - l; l 312 kernel/trace/trace_branch.c p->data.correct, p->constant, l, p->data.incorrect); l 923 kernel/trace/trace_events.c loff_t l; l 928 kernel/trace/trace_events.c for (l = 0; l <= *pos; ) { l 929 kernel/trace/trace_events.c file = t_next(m, file, &l); l 956 kernel/trace/trace_events.c loff_t l; l 961 kernel/trace/trace_events.c for (l = 0; l <= *pos; ) { l 962 kernel/trace/trace_events.c file = s_next(m, file, &l); l 1268 kernel/trace/trace_events.c loff_t l = 0; l 1275 kernel/trace/trace_events.c while (l < *pos && p) l 1276 kernel/trace/trace_events.c p = f_next(m, p, &l); l 219 kernel/trace/trace_stat.c struct stat_node *l = container_of(v, struct stat_node, node); l 224 kernel/trace/trace_stat.c return session->ts->stat_show(s, l->stat); l 126 lib/bch.c const int l = BCH_ECC_WORDS(bch)-1; l 129 lib/bch.c p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff); l 131 lib/bch.c for (i = 0; i < l; i++) l 134 lib/bch.c ecc[l] = (ecc[l] << 8)^(*p); l 193 lib/bch.c const unsigned int l = BCH_ECC_WORDS(bch)-1; l 199 lib/bch.c const uint32_t * const tab1 = tab0 + 256*(l+1); l 200 lib/bch.c const uint32_t * const tab2 = tab1 + 256*(l+1); l 201 lib/bch.c const uint32_t * const tab3 = tab2 + 256*(l+1); l 244 lib/bch.c p0 = tab0 + (l+1)*((w >> 0) & 0xff); l 245 lib/bch.c p1 = tab1 + (l+1)*((w >> 8) & 0xff); l 246 lib/bch.c p2 = tab2 + (l+1)*((w >> 16) & 0xff); l 247 lib/bch.c p3 = tab3 + (l+1)*((w >> 24) & 0xff); l 249 lib/bch.c for (i = 0; i < l; i++) l 252 lib/bch.c r[l] = p0[l]^p1[l]^p2[l]^p3[l]; l 391 lib/bch.c unsigned int i, j, tmp, l, pd = 1, d = syn[0]; l 414 lib/bch.c l = a_log(bch, pelp->c[j]); l 415 lib/bch.c elp->c[j+k] ^= a_pow(bch, tmp+l); l 648 lib/bch.c int i, l, n = 0; l 667 lib/bch.c l = a_log(bch, f); l 668 lib/bch.c l += (l & 1) ? GF_N(bch) : 0; l 669 lib/bch.c e = a_pow(bch, l/2); l 677 lib/bch.c d = a_pow(bch, 2*l)^gf_mul(bch, b, f)^d; l 712 lib/bch.c int i, d = a->deg, l = GF_N(bch)-a_log(bch, a->c[a->deg]); l 716 lib/bch.c rep[i] = a->c[i] ? mod_s(bch, a_log(bch, a->c[i])+l) : -1; l 1093 lib/bch.c const int l = BCH_ECC_WORDS(bch); l 1097 lib/bch.c memset(bch->mod8_tab, 0, 4*256*l*sizeof(*bch->mod8_tab)); l 1103 lib/bch.c tab = bch->mod8_tab + (b*256+i)*l; l 147 lib/decompress_inflate.c long l = strm->next_out - out_buf; l 148 lib/decompress_inflate.c if (l != flush(out_buf, l)) { l 43 lib/decompress_unlzo.c int l; l 58 lib/decompress_unlzo.c for (l = 0; l < 9; l++) { l 59 lib/decompress_unlzo.c if (*parse++ != lzop_magic[l]) l 88 lib/decompress_unlzo.c l = *parse++; l 90 lib/decompress_unlzo.c if (end - parse < l + 4) l 92 lib/decompress_unlzo.c parse += l + 4; l 74 lib/digsig.c unsigned nret, l; l 154 lib/digsig.c p = mpi_get_buffer(res, &l, NULL); l 161 lib/digsig.c head = len - l; l 163 lib/digsig.c memcpy(out1 + head, p, l); l 82 lib/find_bit_benchmark.c unsigned long l, cnt = 0; l 88 lib/find_bit_benchmark.c l = find_last_bit(bitmap, len); l 89 lib/find_bit_benchmark.c if (l >= len) l 91 lib/find_bit_benchmark.c len = l; l 344 lib/inflate.c int l; /* bits per table (returned in m) */ l 393 lib/inflate.c l = *m; l 398 lib/inflate.c if ((unsigned)l < j) l 399 lib/inflate.c l = j; l 404 lib/inflate.c if ((unsigned)l > i) l 405 lib/inflate.c l = i; l 406 lib/inflate.c *m = l; l 447 lib/inflate.c w = -l; /* bits decoded == (l * h) */ l 463 lib/inflate.c while (k > w + l) l 467 lib/inflate.c w += l; /* previous table always l bits */ l 470 lib/inflate.c z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ l 507 lib/inflate.c r.b = (uch)l; /* bits to dump before this table */ l 510 lib/inflate.c j = i >> (w - l); /* (get around Turbo C bug) */ l 548 lib/inflate.c w -= l; l 775 lib/inflate.c unsigned *l; /* length list for huft_build */ l 779 lib/inflate.c l = malloc(sizeof(*l) * 288); l 780 lib/inflate.c if (l == NULL) l 785 lib/inflate.c l[i] = 8; l 787 lib/inflate.c l[i] = 9; l 789 lib/inflate.c l[i] = 7; l 791 lib/inflate.c l[i] = 8; l 793 lib/inflate.c if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) { l 794 lib/inflate.c free(l); l 800 lib/inflate.c l[i] = 5; l 802 lib/inflate.c if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1) l 805 lib/inflate.c free(l); l 814 lib/inflate.c free(l); l 819 lib/inflate.c free(l); l 834 lib/inflate.c unsigned l; /* last length */ l 914 lib/inflate.c i = l = 0; l 922 lib/inflate.c ll[i++] = l = j; /* save last length in l */ l 933 lib/inflate.c ll[i++] = l; l 946 lib/inflate.c l = 0; l 959 lib/inflate.c l = 0; l 94 lib/kfifo.c unsigned int l; l 102 lib/kfifo.c l = min(len, size - off); l 104 lib/kfifo.c memcpy(fifo->data + off, src, l); l 105 lib/kfifo.c memcpy(fifo->data, src + l, len - l); l 116 lib/kfifo.c unsigned int l; l 118 lib/kfifo.c l = kfifo_unused(fifo); l 119 lib/kfifo.c if (len > l) l 120 lib/kfifo.c len = l; l 133 lib/kfifo.c unsigned int l; l 141 lib/kfifo.c l = min(len, size - off); l 143 lib/kfifo.c memcpy(dst, fifo->data + off, l); l 144 lib/kfifo.c memcpy(dst + l, fifo->data, len - l); l 155 lib/kfifo.c unsigned int l; l 157 lib/kfifo.c l = fifo->in - fifo->out; l 158 lib/kfifo.c if (len > l) l 159 lib/kfifo.c len = l; l 181 lib/kfifo.c unsigned int l; l 190 lib/kfifo.c l = min(len, size - off); l 192 lib/kfifo.c ret = copy_from_user(fifo->data + off, from, l); l 194 lib/kfifo.c ret = DIV_ROUND_UP(ret + len - l, esize); l 196 lib/kfifo.c ret = copy_from_user(fifo->data, from + l, len - l); l 213 lib/kfifo.c unsigned int l; l 221 lib/kfifo.c l = kfifo_unused(fifo); l 222 lib/kfifo.c if (len > l) l 223 lib/kfifo.c len = l; l 239 lib/kfifo.c unsigned int l; l 250 lib/kfifo.c l = min(len, size - off); l 252 lib/kfifo.c ret = copy_to_user(to, fifo->data + off, l); l 254 lib/kfifo.c ret = DIV_ROUND_UP(ret + len - l, esize); l 256 lib/kfifo.c ret = copy_to_user(to + l, fifo->data, len - l); l 273 lib/kfifo.c unsigned int l; l 281 lib/kfifo.c l = fifo->in - fifo->out; l 282 lib/kfifo.c if (len > l) l 283 lib/kfifo.c len = l; l 299 lib/kfifo.c unsigned int l; l 312 lib/kfifo.c l = 0; l 314 lib/kfifo.c while (len >= l + PAGE_SIZE - off) { l 317 lib/kfifo.c l += PAGE_SIZE; l 320 lib/kfifo.c if (page_to_phys(page) != page_to_phys(npage) - l) { l 321 lib/kfifo.c sg_set_page(sgl, page, l - off, off); l 326 lib/kfifo.c len -= l - off; l 327 lib/kfifo.c l = off = 0; l 339 lib/kfifo.c unsigned int l; l 348 lib/kfifo.c l = min(len, size - off); l 350 lib/kfifo.c n = setup_sgl_buf(sgl, fifo->data + off, nents, l); l 351 lib/kfifo.c n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); l 359 lib/kfifo.c unsigned int l; l 361 lib/kfifo.c l = kfifo_unused(fifo); l 362 lib/kfifo.c if (len > l) l 363 lib/kfifo.c len = l; l 372 lib/kfifo.c unsigned int l; l 374 lib/kfifo.c l = fifo->in - fifo->out; l 375 lib/kfifo.c if (len > l) l 376 lib/kfifo.c len = l; l 400 lib/kfifo.c unsigned int l; l 404 lib/kfifo.c l = __KFIFO_PEEK(data, fifo->out, mask); l 407 lib/kfifo.c l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8; l 409 lib/kfifo.c return l; l 309 lib/logic_pio.c BUILD_LOGIC_IO(l, u32) l 47 lib/lz4/lz4_decompress.c #define DEBUGLOG(l, ...) {} /* disabled */ l 311 lib/lzo/lzo1x_compress.c size_t l = in_len; l 328 lib/lzo/lzo1x_compress.c while (l > 20) { l 329 lib/lzo/lzo1x_compress.c size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); l 339 lib/lzo/lzo1x_compress.c l -= ll; l 341 lib/lzo/lzo1x_compress.c t += l; l 19 lib/math/lcm.c unsigned long l = lcm(a, b); l 21 lib/math/lcm.c if (l) l 22 lib/math/lcm.c return l; l 17 lib/math/reciprocal_div.c int l; l 19 lib/math/reciprocal_div.c l = fls(d - 1); l 20 lib/math/reciprocal_div.c m = ((1ULL << 32) * ((1ULL << l) - d)); l 24 lib/math/reciprocal_div.c R.sh1 = min(l, 1); l 25 lib/math/reciprocal_div.c R.sh2 = max(l - 1, 0); l 34 lib/math/reciprocal_div.c u32 l, post_shift; l 38 lib/math/reciprocal_div.c l = fls(d - 1); l 43 lib/math/reciprocal_div.c WARN(l == 32, l 46 lib/math/reciprocal_div.c post_shift = l; l 47 lib/math/reciprocal_div.c mlow = 1ULL << (32 + l); l 49 lib/math/reciprocal_div.c mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec)); l 64 lib/math/reciprocal_div.c R.exp = l; l 440 lib/mpi/longlong.h #define rshift_rhlc(r, h, l, c) \ l 443 lib/mpi/longlong.h "=r" (r) : "r" (h), "r" (l), "rn" (c)) l 499 lib/mpi/longlong.h #define rshift_rhlc(r, h, l, c) \ l 504 lib/mpi/longlong.h __nn.__i.__h = (h); __nn.__i.__l = (l); \ l 27 lib/percpu_test.c long l = 0; l 34 lib/percpu_test.c l += -1; l 36 lib/percpu_test.c CHECK(l, long_counter, -1); l 38 lib/percpu_test.c l += 1; l 40 lib/percpu_test.c CHECK(l, long_counter, 0); l 65 lib/percpu_test.c l += -ui_one; l 67 lib/percpu_test.c CHECK(l, long_counter, 0xffffffff); l 69 lib/percpu_test.c l += ui_one; l 71 lib/percpu_test.c CHECK(l, long_counter, (long)0x100000000LL); l 74 lib/percpu_test.c l = 0; l 77 lib/percpu_test.c l -= ui_one; l 79 lib/percpu_test.c CHECK(l, long_counter, -1); l 81 lib/percpu_test.c l = 0; l 84 lib/percpu_test.c l += ui_one; l 86 lib/percpu_test.c CHECK(l, long_counter, 1); l 88 lib/percpu_test.c l += -ui_one; l 90 lib/percpu_test.c CHECK(l, long_counter, (long)0x100000000LL); l 92 lib/percpu_test.c l = 0; l 95 lib/percpu_test.c l -= ui_one; l 97 lib/percpu_test.c CHECK(l, long_counter, -1); l 98 lib/percpu_test.c CHECK(l, long_counter, ULONG_MAX); l 73 lib/test_hexdump.c size_t l = len; l 81 lib/test_hexdump.c if (l > rs) l 82 lib/test_hexdump.c l = rs; l 98 lib/test_hexdump.c for (i = 0; i < l / gs; i++) { l 116 lib/test_hexdump.c strncpy(p, data_a, l); l 117 lib/test_hexdump.c p += l; l 619 lib/xz/xz_dec_lzma2.c static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, l 625 lib/xz/xz_dec_lzma2.c if (!rc_bit(&s->rc, &l->choice)) { l 626 lib/xz/xz_dec_lzma2.c probs = l->low[pos_state]; l 630 lib/xz/xz_dec_lzma2.c if (!rc_bit(&s->rc, &l->choice2)) { l 631 lib/xz/xz_dec_lzma2.c probs = l->mid[pos_state]; l 635 lib/xz/xz_dec_lzma2.c probs = l->high; l 74 mm/list_lru.c struct list_lru_one *l = &nlru->lru; l 84 mm/list_lru.c l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); l 88 mm/list_lru.c return l; l 130 mm/list_lru.c struct list_lru_one *l; l 134 mm/list_lru.c l = list_lru_from_kmem(nlru, item, &memcg); l 135 mm/list_lru.c list_add_tail(item, &l->list); l 137 mm/list_lru.c if (!l->nr_items++) l 153 mm/list_lru.c struct list_lru_one *l; l 157 mm/list_lru.c l = list_lru_from_kmem(nlru, item, NULL); l 159 mm/list_lru.c l->nr_items--; l 188 mm/list_lru.c struct list_lru_one *l; l 192 mm/list_lru.c l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); l 193 mm/list_lru.c count = l->nr_items; l 215 mm/list_lru.c struct list_lru_one *l; l 219 mm/list_lru.c l = list_lru_from_memcg_idx(nlru, memcg_idx); l 221 mm/list_lru.c list_for_each_safe(item, n, &l->list) { l 232 mm/list_lru.c ret = isolate(item, l, &nlru->lock, cb_arg); l 249 mm/list_lru.c list_move_tail(item, &l->list); l 325 mm/list_lru.c static void init_one_lru(struct list_lru_one *l) l 327 mm/list_lru.c INIT_LIST_HEAD(&l->list); l 328 mm/list_lru.c l->nr_items = 0; l 347 mm/list_lru.c struct list_lru_one *l; l 349 mm/list_lru.c l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL); l 350 mm/list_lru.c if (!l) l 353 mm/list_lru.c init_one_lru(l); l 354 mm/list_lru.c memcg_lrus->lru[i] = l; l 4483 mm/memory.c int i, n, base, l; l 4493 mm/memory.c l = n; l 4502 mm/memory.c l = pages_per_huge_page - n; l 4513 mm/memory.c for (i = 0; i < l; i++) { l 4515 mm/memory.c int right_idx = base + 2 * l - 1 - i; l 168 mm/migrate.c void putback_movable_pages(struct list_head *l) l 173 mm/migrate.c list_for_each_entry_safe(page, page2, l, lru) { l 2042 mm/slub.c enum slab_modes l = M_NONE, m = M_NONE; l 2138 mm/slub.c if (l != m) { l 2139 mm/slub.c if (l == M_PARTIAL) l 2141 mm/slub.c else if (l == M_FULL) l 2150 mm/slub.c l = m; l 4521 mm/slub.c struct location *l; l 4526 mm/slub.c l = (void *)__get_free_pages(flags, order); l 4527 mm/slub.c if (!l) l 4531 mm/slub.c memcpy(l, t->loc, sizeof(struct location) * t->count); l 4535 mm/slub.c t->loc = l; l 4543 mm/slub.c struct location *l; l 4563 mm/slub.c l = &t->loc[pos]; l 4564 mm/slub.c l->count++; l 4566 mm/slub.c l->sum_time += age; l 4567 mm/slub.c if (age < l->min_time) l 4568 mm/slub.c l->min_time = age; l 4569 mm/slub.c if (age > l->max_time) l 4570 mm/slub.c l->max_time = age; l 4572 mm/slub.c if (track->pid < l->min_pid) l 4573 mm/slub.c l->min_pid = track->pid; l 4574 mm/slub.c if (track->pid > l->max_pid) l 4575 mm/slub.c l->max_pid = track->pid; l 4578 mm/slub.c to_cpumask(l->cpus)); l 4580 mm/slub.c node_set(page_to_nid(virt_to_page(track)), l->nodes); l 4596 mm/slub.c l = t->loc + pos; l 4598 mm/slub.c memmove(l + 1, l, l 4601 mm/slub.c l->count = 1; l 4602 mm/slub.c l->addr = track->addr; l 4603 mm/slub.c l->sum_time = age; l 4604 mm/slub.c l->min_time = age; l 4605 mm/slub.c l->max_time = age; l 4606 mm/slub.c l->min_pid = track->pid; l 4607 mm/slub.c l->max_pid = track->pid; l 4608 mm/slub.c cpumask_clear(to_cpumask(l->cpus)); l 4609 mm/slub.c cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); l 4610 mm/slub.c nodes_clear(l->nodes); l 4611 mm/slub.c node_set(page_to_nid(virt_to_page(track)), l->nodes); l 4664 mm/slub.c struct location *l = &t.loc[i]; l 4668 mm/slub.c len += sprintf(buf + len, "%7ld ", l->count); l 4670 mm/slub.c if (l->addr) l 4671 mm/slub.c len += sprintf(buf + len, "%pS", (void *)l->addr); l 4675 mm/slub.c if (l->sum_time != l->min_time) { l 4677 mm/slub.c l->min_time, l 4678 mm/slub.c (long)div_u64(l->sum_time, l->count), l 4679 mm/slub.c l->max_time); l 4682 mm/slub.c l->min_time); l 4684 mm/slub.c if (l->min_pid != l->max_pid) l 4686 mm/slub.c l->min_pid, l->max_pid); l 4689 mm/slub.c l->min_pid); l 4692 mm/slub.c !cpumask_empty(to_cpumask(l->cpus)) && l 4696 mm/slub.c cpumask_pr_args(to_cpumask(l->cpus))); l 4698 mm/slub.c if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && l 4702 mm/slub.c nodemask_pr_args(&l->nodes)); l 2713 mm/swapfile.c loff_t l = *pos; l 2717 mm/swapfile.c if (!l) l 2723 mm/swapfile.c if (!--l) l 1725 mm/vmstat.c unsigned long *l = arg; l 1726 mm/vmstat.c unsigned long off = l - (unsigned long *)m->private; l 1729 mm/vmstat.c seq_put_decimal_ull(m, " ", *l); l 667 mm/z3fold.c struct list_head *l = &unbuddied[i]; l 669 mm/z3fold.c zhdr = list_first_entry_or_null(READ_ONCE(l), l 677 mm/z3fold.c l = &unbuddied[i]; l 678 mm/z3fold.c if (unlikely(zhdr != list_first_entry(READ_ONCE(l), l 718 mm/z3fold.c struct list_head *l; l 722 mm/z3fold.c l = &unbuddied[chunks]; l 724 mm/z3fold.c zhdr = list_first_entry_or_null(READ_ONCE(l), l 30 net/appletalk/atalk_proc.c loff_t l = *pos; l 33 net/appletalk/atalk_proc.c return l ? atalk_get_interface_idx(--l) : SEQ_START_TOKEN; l 91 net/appletalk/atalk_proc.c loff_t l = *pos; l 94 net/appletalk/atalk_proc.c return l ? atalk_get_route_idx(--l) : SEQ_START_TOKEN; l 833 net/atm/lec.c loff_t *l) l 841 net/atm/lec.c --*l; l 845 net/atm/lec.c if (--*l < 0) l 850 net/atm/lec.c return (*l < 0) ? state : NULL; l 853 net/atm/lec.c static void *lec_arp_walk(struct lec_state *state, loff_t *l, l 860 net/atm/lec.c v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l); l 868 net/atm/lec.c static void *lec_misc_walk(struct lec_state *state, loff_t *l, l 880 net/atm/lec.c v = lec_tbl_walk(state, lec_misc_tables[q], l); l 888 net/atm/lec.c static void *lec_priv_walk(struct lec_state *state, loff_t *l, l 895 net/atm/lec.c if (!lec_arp_walk(state, l, priv) && !lec_misc_walk(state, l, priv)) { l 904 net/atm/lec.c static void *lec_itf_walk(struct lec_state *state, loff_t *l) l 911 net/atm/lec.c lec_priv_walk(state, l, netdev_priv(dev)) : NULL; l 921 net/atm/lec.c static void *lec_get_idx(struct lec_state *state, loff_t l) l 926 net/atm/lec.c v = lec_itf_walk(state, &l); l 109 net/atm/mpoa_proc.c loff_t l = *pos; l 112 net/atm/mpoa_proc.c if (!l--) l 115 net/atm/mpoa_proc.c if (!l--) l 78 net/atm/proc.c static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) l 90 net/atm/proc.c l--; l 94 net/atm/proc.c l -= compare_family(sk, family); l 95 net/atm/proc.c if (l < 0) l 105 net/atm/proc.c return (l < 0); l 108 net/atm/proc.c static inline void *vcc_walk(struct seq_file *seq, loff_t l) l 113 net/atm/proc.c return __vcc_walk(&state->sk, family, &state->bucket, l) ? l 26 net/batman-adv/send.h bool batadv_forw_packet_steal(struct batadv_forw_packet *packet, spinlock_t *l); l 141 net/bluetooth/af_bluetooth.c void bt_sock_link(struct bt_sock_list *l, struct sock *sk) l 143 net/bluetooth/af_bluetooth.c write_lock(&l->lock); l 144 net/bluetooth/af_bluetooth.c sk_add_node(sk, &l->head); l 145 net/bluetooth/af_bluetooth.c write_unlock(&l->lock); l 149 net/bluetooth/af_bluetooth.c void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) l 151 net/bluetooth/af_bluetooth.c write_lock(&l->lock); l 153 net/bluetooth/af_bluetooth.c write_unlock(&l->lock); l 611 net/bluetooth/af_bluetooth.c __acquires(seq->private->l->lock) l 613 net/bluetooth/af_bluetooth.c struct bt_sock_list *l = PDE_DATA(file_inode(seq->file)); l 615 net/bluetooth/af_bluetooth.c read_lock(&l->lock); l 616 net/bluetooth/af_bluetooth.c return seq_hlist_start_head(&l->head, *pos); l 621 net/bluetooth/af_bluetooth.c struct bt_sock_list *l = PDE_DATA(file_inode(seq->file)); l 623 net/bluetooth/af_bluetooth.c return seq_hlist_next(v, &l->head, pos); l 627 net/bluetooth/af_bluetooth.c __releases(seq->private->l->lock) l 629 net/bluetooth/af_bluetooth.c struct bt_sock_list *l = PDE_DATA(file_inode(seq->file)); l 631 net/bluetooth/af_bluetooth.c read_unlock(&l->lock); l 636 net/bluetooth/af_bluetooth.c struct bt_sock_list *l = PDE_DATA(file_inode(seq->file)); l 641 net/bluetooth/af_bluetooth.c if (l->custom_seq_show) { l 643 net/bluetooth/af_bluetooth.c l->custom_seq_show(seq, v); l 661 net/bluetooth/af_bluetooth.c if (l->custom_seq_show) { l 663 net/bluetooth/af_bluetooth.c l->custom_seq_show(seq, v); l 1702 net/bluetooth/l2cap_core.c struct l2cap_chan *chan, *l; l 1731 net/bluetooth/l2cap_core.c list_for_each_entry_safe(chan, l, &conn->chan_l, list) { l 220 net/caif/cfcnfg.c struct cfctrl_link_param *l) l 226 net/caif/cfcnfg.c memset(l, 0, sizeof(*l)); l 228 net/caif/cfcnfg.c l->priority = CAIF_PRIO_MAX - s->priority + 1; l 234 net/caif/cfcnfg.c l->phyid = res; l 249 net/caif/cfcnfg.c l->phyid = dev_info->id; l 253 net/caif/cfcnfg.c l->linktype = CFCTRL_SRV_VEI; l 254 net/caif/cfcnfg.c l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3; l 255 net/caif/cfcnfg.c l->chtype = s->sockaddr.u.at.type & 0x3; l 258 net/caif/cfcnfg.c l->linktype = CFCTRL_SRV_DATAGRAM; l 259 net/caif/cfcnfg.c l->chtype = 0x00; l 260 net/caif/cfcnfg.c l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; l 263 net/caif/cfcnfg.c l->linktype = CFCTRL_SRV_DATAGRAM; l 264 net/caif/cfcnfg.c l->chtype = 0x03; l 265 net/caif/cfcnfg.c l->endpoint = 0x00; l 266 net/caif/cfcnfg.c l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; l 269 net/caif/cfcnfg.c l->linktype = CFCTRL_SRV_RFM; l 270 net/caif/cfcnfg.c l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; l 271 net/caif/cfcnfg.c strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, l 272 net/caif/cfcnfg.c sizeof(l->u.rfm.volume)); l 275 net/caif/cfcnfg.c l->linktype = CFCTRL_SRV_UTIL; l 276 net/caif/cfcnfg.c l->endpoint = 0x00; l 277 net/caif/cfcnfg.c l->chtype = 0x00; l 278 net/caif/cfcnfg.c strlcpy(l->u.utility.name, s->sockaddr.u.util.service, l 279 net/caif/cfcnfg.c sizeof(l->u.utility.name)); l 280 net/caif/cfcnfg.c caif_assert(sizeof(l->u.utility.name) > 10); l 281 net/caif/cfcnfg.c l->u.utility.paramlen = s->param.size; l 282 net/caif/cfcnfg.c if (l->u.utility.paramlen > sizeof(l->u.utility.params)) l 283 net/caif/cfcnfg.c l->u.utility.paramlen = sizeof(l->u.utility.params); l 285 net/caif/cfcnfg.c memcpy(l->u.utility.params, s->param.data, l 286 net/caif/cfcnfg.c l->u.utility.paramlen); l 290 net/caif/cfcnfg.c l->linktype = CFCTRL_SRV_DBG; l 291 net/caif/cfcnfg.c l->endpoint = s->sockaddr.u.dbg.service; l 292 net/caif/cfcnfg.c l->chtype = s->sockaddr.u.dbg.type; l 49 net/caif/cfserl.c static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) l 51 net/caif/cfserl.c struct cfserl *layr = container_obj(l); l 207 net/ceph/crush/mapper.c int l; l 215 net/ceph/crush/mapper.c l = left(n); l 216 net/ceph/crush/mapper.c if (t < bucket->node_weights[l]) l 217 net/ceph/crush/mapper.c n = l; l 68 net/ceph/pagevec.c int l, bad; l 71 net/ceph/pagevec.c l = min_t(int, PAGE_SIZE-po, left); l 72 net/ceph/pagevec.c bad = copy_from_user(page_address(pages[i]) + po, data, l); l 73 net/ceph/pagevec.c if (bad == l) l 75 net/ceph/pagevec.c data += l - bad; l 76 net/ceph/pagevec.c left -= l - bad; l 77 net/ceph/pagevec.c po += l - bad; l 96 net/ceph/pagevec.c size_t l = min_t(size_t, PAGE_SIZE-po, left); l 98 net/ceph/pagevec.c memcpy(page_address(pages[i]) + po, data, l); l 99 net/ceph/pagevec.c data += l; l 100 net/ceph/pagevec.c left -= l; l 101 net/ceph/pagevec.c po += l; l 119 net/ceph/pagevec.c size_t l = min_t(size_t, PAGE_SIZE-po, left); l 121 net/ceph/pagevec.c memcpy(data, page_address(pages[i]) + po, l); l 122 net/ceph/pagevec.c data += l; l 123 net/ceph/pagevec.c left -= l; l 124 net/ceph/pagevec.c po += l; l 24 net/ceph/striper.c void ceph_calc_file_object_mapping(struct ceph_file_layout *l, l 28 net/ceph/striper.c u32 stripes_per_object = l->object_size / l->stripe_unit; l 37 net/ceph/striper.c blockno = div_u64_rem(off, l->stripe_unit, &blockoff); l 38 net/ceph/striper.c stripeno = div_u64_rem(blockno, l->stripe_count, &stripepos); l 41 net/ceph/striper.c *objno = objsetno * l->stripe_count + stripepos; l 42 net/ceph/striper.c *objoff = objsetpos * l->stripe_unit + blockoff; l 43 net/ceph/striper.c *xlen = min_t(u64, len, l->stripe_unit - blockoff); l 110 net/ceph/striper.c int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len, l 124 net/ceph/striper.c ceph_calc_file_object_mapping(l, off, len, &objno, &objoff, l 174 net/ceph/striper.c int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len, l 184 net/ceph/striper.c ceph_calc_file_object_mapping(l, off, len, &objno, &objoff, l 211 net/ceph/striper.c int ceph_extent_to_file(struct ceph_file_layout *l, l 216 net/ceph/striper.c u32 stripes_per_object = l->object_size / l->stripe_unit; l 231 net/ceph/striper.c *num_file_extents = DIV_ROUND_UP_ULL(objoff + objlen, l->stripe_unit) - l 232 net/ceph/striper.c DIV_ROUND_DOWN_ULL(objoff, l->stripe_unit); l 238 net/ceph/striper.c div_u64_rem(objoff, l->stripe_unit, &blockoff); l 242 net/ceph/striper.c objsetno = div_u64_rem(objno, l->stripe_count, &stripepos); l 243 net/ceph/striper.c stripeno = div_u64(objoff, l->stripe_unit) + l 245 net/ceph/striper.c blockno = stripeno * l->stripe_count + stripepos; l 246 net/ceph/striper.c off = blockno * l->stripe_unit + blockoff; l 247 net/ceph/striper.c len = min_t(u64, objlen, l->stripe_unit - blockoff); l 263 net/ceph/striper.c u64 ceph_get_num_objects(struct ceph_file_layout *l, u64 size) l 265 net/ceph/striper.c u64 period = (u64)l->stripe_count * l->object_size; l 272 net/ceph/striper.c remainder_bytes < (u64)l->stripe_count * l->stripe_unit) l 273 net/ceph/striper.c remainder_objs = l->stripe_count - l 274 net/ceph/striper.c DIV_ROUND_UP_ULL(remainder_bytes, l->stripe_unit); l 276 net/ceph/striper.c return num_periods * l->stripe_count - remainder_objs; l 2349 net/core/filter.c u32 new, i = 0, l = 0, space, copy = 0, offset = 0; l 2359 net/core/filter.c offset += l; l 2360 net/core/filter.c l = sk_msg_elem(msg, i)->length; l 2362 net/core/filter.c if (start < offset + l) l 2367 net/core/filter.c if (start >= offset + l) l 2510 net/core/filter.c u32 i = 0, l = 0, space, offset = 0; l 2520 net/core/filter.c offset += l; l 2521 net/core/filter.c l = sk_msg_elem(msg, i)->length; l 2523 net/core/filter.c if (start < offset + l) l 2529 net/core/filter.c if (start >= offset + l || last >= msg->sg.size) l 52 net/core/utils.c unsigned int l; l 56 net/core/utils.c l = 0; l 58 net/core/utils.c l <<= 8; l 66 net/core/utils.c l |= val; l 71 net/core/utils.c return htonl(l); l 83 net/ipv4/ah4.c int l = iph->ihl*4 - sizeof(struct iphdr); l 86 net/ipv4/ah4.c while (l > 0) { l 91 net/ipv4/ah4.c l--; l 96 net/ipv4/ah4.c if (optlen<2 || optlen>l) l 114 net/ipv4/ah4.c l -= optlen; l 357 net/ipv4/fib_trie.c struct key_vector *l; l 365 net/ipv4/fib_trie.c l = kv->kv; l 366 net/ipv4/fib_trie.c l->key = key; l 367 net/ipv4/fib_trie.c l->pos = 0; l 368 net/ipv4/fib_trie.c l->bits = 0; l 369 net/ipv4/fib_trie.c l->slen = fa->fa_slen; l 372 net/ipv4/fib_trie.c INIT_HLIST_HEAD(&l->leaf); l 373 net/ipv4/fib_trie.c hlist_add_head(&fa->fa_list, &l->leaf); l 375 net/ipv4/fib_trie.c return l; l 1017 net/ipv4/fib_trie.c struct key_vector *n, *l; l 1019 net/ipv4/fib_trie.c l = leaf_new(key, new); l 1020 net/ipv4/fib_trie.c if (!l) l 1053 net/ipv4/fib_trie.c NODE_INIT_PARENT(l, tp); l 1054 net/ipv4/fib_trie.c put_child_root(tp, key, l); l 1059 net/ipv4/fib_trie.c node_free(l); l 1068 net/ipv4/fib_trie.c struct key_vector *l, struct fib_alias *new, l 1071 net/ipv4/fib_trie.c if (!l) l 1079 net/ipv4/fib_trie.c hlist_for_each_entry(last, &l->leaf, fa_list) { l 1091 net/ipv4/fib_trie.c hlist_add_head_rcu(&new->fa_list, &l->leaf); l 1095 net/ipv4/fib_trie.c if (l->slen < new->fa_slen) { l 1096 net/ipv4/fib_trie.c l->slen = new->fa_slen; l 1126 net/ipv4/fib_trie.c struct key_vector *l, *tp; l 1148 net/ipv4/fib_trie.c l = fib_find_node(t, &tp, key); l 1149 net/ipv4/fib_trie.c fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority, l 1275 net/ipv4/fib_trie.c err = fib_insert_alias(t, tp, l, new_fa, fa, key); l 1516 net/ipv4/fib_trie.c struct key_vector *l, struct fib_alias *old) l 1528 net/ipv4/fib_trie.c if (hlist_empty(&l->leaf)) { l 1529 net/ipv4/fib_trie.c if (tp->slen == l->slen) l 1531 net/ipv4/fib_trie.c put_child_root(tp, l->key, NULL); l 1532 net/ipv4/fib_trie.c node_free(l); l 1542 net/ipv4/fib_trie.c l->slen = fa->fa_slen; l 1552 net/ipv4/fib_trie.c struct key_vector *l, *tp; l 1563 net/ipv4/fib_trie.c l = fib_find_node(t, &tp, key); l 1564 net/ipv4/fib_trie.c if (!l) l 1567 net/ipv4/fib_trie.c fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id); l 1607 net/ipv4/fib_trie.c fib_remove_alias(t, tp, l, fa_to_delete); l 1736 net/ipv4/fib_trie.c struct key_vector *l, *tp = ot->kv; l 1751 net/ipv4/fib_trie.c while ((l = leaf_walk_rcu(&tp, key)) != NULL) { l 1754 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { l 1769 net/ipv4/fib_trie.c local_l = fib_find_node(lt, &local_tp, l->key); l 1772 net/ipv4/fib_trie.c NULL, l->key)) { l 1779 net/ipv4/fib_trie.c key = l->key + 1; l 1780 net/ipv4/fib_trie.c if (key < l->key) l 2018 net/ipv4/fib_trie.c static void fib_leaf_notify(struct net *net, struct key_vector *l, l 2023 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { l 2035 net/ipv4/fib_trie.c call_fib_entry_notifier(nb, net, FIB_EVENT_ENTRY_ADD, l->key, l 2044 net/ipv4/fib_trie.c struct key_vector *l, *tp = t->kv; l 2047 net/ipv4/fib_trie.c while ((l = leaf_walk_rcu(&tp, key)) != NULL) { l 2048 net/ipv4/fib_trie.c fib_leaf_notify(net, l, tb, nb); l 2050 net/ipv4/fib_trie.c key = l->key + 1; l 2052 net/ipv4/fib_trie.c if (key < l->key) l 2087 net/ipv4/fib_trie.c static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, l 2092 net/ipv4/fib_trie.c __be32 xkey = htonl(l->key); l 2105 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { l 2171 net/ipv4/fib_trie.c struct key_vector *l, *tp = t->kv; l 2184 net/ipv4/fib_trie.c while ((l = leaf_walk_rcu(&tp, key)) != NULL) { l 2187 net/ipv4/fib_trie.c err = fn_trie_dump_leaf(l, tb, skb, cb, filter); l 2195 net/ipv4/fib_trie.c key = l->key + 1; l 2201 net/ipv4/fib_trie.c if (key < l->key) l 2669 net/ipv4/fib_trie.c struct key_vector *l, **tp = &iter->tnode; l 2682 net/ipv4/fib_trie.c while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { l 2683 net/ipv4/fib_trie.c key = l->key + 1; l 2685 net/ipv4/fib_trie.c l = NULL; l 2692 net/ipv4/fib_trie.c if (l) l 2693 net/ipv4/fib_trie.c iter->key = l->key; /* remember it */ l 2697 net/ipv4/fib_trie.c return l; l 2729 net/ipv4/fib_trie.c struct key_vector *l = NULL; l 2736 net/ipv4/fib_trie.c l = leaf_walk_rcu(&iter->tnode, key); l 2738 net/ipv4/fib_trie.c if (l) { l 2739 net/ipv4/fib_trie.c iter->key = l->key; l 2745 net/ipv4/fib_trie.c return l; l 2783 net/ipv4/fib_trie.c struct key_vector *l = v; l 2793 net/ipv4/fib_trie.c prefix = htonl(l->key); l 2795 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { l 212 net/ipv4/ip_options.c int l = opt->optlen; l 215 net/ipv4/ip_options.c while (l > 0) { l 220 net/ipv4/ip_options.c l--; l 225 net/ipv4/ip_options.c if (optlen < 2 || optlen > l) l 229 net/ipv4/ip_options.c l -= optlen; l 263 net/ipv4/ip_options.c int optlen, l; l 272 net/ipv4/ip_options.c for (l = opt->optlen; l > 0; ) { l 275 net/ipv4/ip_options.c for (optptr++, l--; l > 0; optptr++, l--) { l 283 net/ipv4/ip_options.c l--; l 287 net/ipv4/ip_options.c if (unlikely(l < 2)) { l 292 net/ipv4/ip_options.c if (optlen < 2 || optlen > l) { l 463 net/ipv4/ip_options.c l -= optlen; l 41 net/ipv6/ip6_flowlabel.c #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK) l 225 net/ipv6/ndisc.c int l; l 228 net/ipv6/ndisc.c l = nd_opt->nd_opt_len << 3; l 229 net/ipv6/ndisc.c if (opt_len < l || l == 0) l 278 net/ipv6/ndisc.c opt_len -= l; l 279 net/ipv6/ndisc.c nd_opt = ((void *)nd_opt) + l; l 642 net/iucv/af_iucv.c void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) l 644 net/iucv/af_iucv.c write_lock_bh(&l->lock); l 645 net/iucv/af_iucv.c sk_add_node(sk, &l->head); l 646 net/iucv/af_iucv.c write_unlock_bh(&l->lock); l 649 net/iucv/af_iucv.c void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) l 651 net/iucv/af_iucv.c write_lock_bh(&l->lock); l 653 net/iucv/af_iucv.c write_unlock_bh(&l->lock); l 61 net/llc/llc_proc.c loff_t l = *pos; l 64 net/llc/llc_proc.c return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN; l 15 net/mac80211/michael.c mctx->l ^= val; l 16 net/mac80211/michael.c mctx->r ^= rol32(mctx->l, 17); l 17 net/mac80211/michael.c mctx->l += mctx->r; l 18 net/mac80211/michael.c mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) | l 19 net/mac80211/michael.c ((mctx->l & 0x00ff00ff) << 8); l 20 net/mac80211/michael.c mctx->l += mctx->r; l 21 net/mac80211/michael.c mctx->r ^= rol32(mctx->l, 3); l 22 net/mac80211/michael.c mctx->l += mctx->r; l 23 net/mac80211/michael.c mctx->r ^= ror32(mctx->l, 2); l 24 net/mac80211/michael.c mctx->l += mctx->r; l 39 net/mac80211/michael.c mctx->l = get_unaligned_le32(key); l 81 net/mac80211/michael.c put_unaligned_le32(mctx.l, mic); l 16 net/mac80211/michael.h u32 l, r; l 3582 net/mac80211/util.c const struct ieee80211_cipher_scheme *l = local->hw.cipher_schemes; l 3588 net/mac80211/util.c if (l[i].cipher == cipher) { l 3589 net/mac80211/util.c cs = &l[i]; l 268 net/mpls/mpls_iptunnel.c int l; l 275 net/mpls/mpls_iptunnel.c for (l = 0; l < a_hdr->labels; l++) l 276 net/mpls/mpls_iptunnel.c if (a_hdr->label[l] != b_hdr->label[l]) l 479 net/netfilter/ipset/ip_set_hash_gen.h struct list_head *l, *lt; l 485 net/netfilter/ipset/ip_set_hash_gen.h list_for_each_safe(l, lt, &h->ad) { l 486 net/netfilter/ipset/ip_set_hash_gen.h list_del(l); l 487 net/netfilter/ipset/ip_set_hash_gen.h kfree(l); l 655 net/netfilter/ipset/ip_set_hash_gen.h struct list_head *l, *lt; l 790 net/netfilter/ipset/ip_set_hash_gen.h list_for_each_safe(l, lt, &h->ad) { l 791 net/netfilter/ipset/ip_set_hash_gen.h x = list_entry(l, struct mtype_resize_ad, list); l 797 net/netfilter/ipset/ip_set_hash_gen.h list_del(l); l 798 net/netfilter/ipset/ip_set_hash_gen.h kfree(l); l 85 net/netfilter/ipvs/ip_vs_conn.c spinlock_t l; l 94 net/netfilter/ipvs/ip_vs_conn.c spin_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 99 net/netfilter/ipvs/ip_vs_conn.c spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 1009 net/netfilter/ipvs/ip_vs_conn.c struct hlist_head *l; l 1024 net/netfilter/ipvs/ip_vs_conn.c iter->l = &ip_vs_conn_tab[idx]; l 1039 net/netfilter/ipvs/ip_vs_conn.c iter->l = NULL; l 1049 net/netfilter/ipvs/ip_vs_conn.c struct hlist_head *l = iter->l; l 1061 net/netfilter/ipvs/ip_vs_conn.c idx = l - ip_vs_conn_tab; l 1064 net/netfilter/ipvs/ip_vs_conn.c iter->l = &ip_vs_conn_tab[idx]; l 1069 net/netfilter/ipvs/ip_vs_conn.c iter->l = NULL; l 1428 net/netfilter/ipvs/ip_vs_conn.c spin_lock_init(&__ip_vs_conntbl_lock_array[idx].l); l 108 net/netfilter/ipvs/ip_vs_mh.c struct ip_vs_mh_lookup *l; l 111 net/netfilter/ipvs/ip_vs_mh.c l = &s->lookup[0]; l 113 net/netfilter/ipvs/ip_vs_mh.c dest = rcu_dereference_protected(l->dest, 1); l 116 net/netfilter/ipvs/ip_vs_mh.c RCU_INIT_POINTER(l->dest, NULL); l 118 net/netfilter/ipvs/ip_vs_mh.c l++; l 190 net/netfilter/nf_conntrack_h323_asn1.c unsigned int v, l; l 193 net/netfilter/nf_conntrack_h323_asn1.c l = b + bs->bit; l 195 net/netfilter/nf_conntrack_h323_asn1.c if (l < 8) { l 196 net/netfilter/nf_conntrack_h323_asn1.c v >>= 8 - l; l 197 net/netfilter/nf_conntrack_h323_asn1.c bs->bit = l; l 198 net/netfilter/nf_conntrack_h323_asn1.c } else if (l == 8) { l 205 net/netfilter/nf_conntrack_h323_asn1.c v >>= 16 - l; l 206 net/netfilter/nf_conntrack_h323_asn1.c bs->bit = l - 8; l 215 net/netfilter/nf_conntrack_h323_asn1.c unsigned int v, l, shift, bytes; l 220 net/netfilter/nf_conntrack_h323_asn1.c l = bs->bit + b; l 222 net/netfilter/nf_conntrack_h323_asn1.c if (l < 8) { l 224 net/netfilter/nf_conntrack_h323_asn1.c bs->bit = l; l 225 net/netfilter/nf_conntrack_h323_asn1.c } else if (l == 8) { l 229 net/netfilter/nf_conntrack_h323_asn1.c for (bytes = l >> 3, shift = 24, v = 0; bytes; l 233 net/netfilter/nf_conntrack_h323_asn1.c if (l < 32) { l 236 net/netfilter/nf_conntrack_h323_asn1.c } else if (l > 32) { l 241 net/netfilter/nf_conntrack_h323_asn1.c bs->bit = l & 0x7; l 606 net/netfilter/xt_hashlimit.c static inline __be32 maskl(__be32 a, unsigned int l) l 608 net/netfilter/xt_hashlimit.c return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0; l 201 net/nfc/llcp.h void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s); l 202 net/nfc/llcp.h void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s); l 23 net/nfc/llcp_core.c void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk) l 25 net/nfc/llcp_core.c write_lock(&l->lock); l 26 net/nfc/llcp_core.c sk_add_node(sk, &l->head); l 27 net/nfc/llcp_core.c write_unlock(&l->lock); l 30 net/nfc/llcp_core.c void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk) l 32 net/nfc/llcp_core.c write_lock(&l->lock); l 34 net/nfc/llcp_core.c write_unlock(&l->lock); l 22 net/nfc/rawsock.c static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk) l 24 net/nfc/rawsock.c write_lock(&l->lock); l 25 net/nfc/rawsock.c sk_add_node(sk, &l->head); l 26 net/nfc/rawsock.c write_unlock(&l->lock); l 29 net/nfc/rawsock.c static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk) l 31 net/nfc/rawsock.c write_lock(&l->lock); l 33 net/nfc/rawsock.c write_unlock(&l->lock); l 242 net/rose/rose_subr.c unsigned char l, lg, n = 0; l 276 net/rose/rose_subr.c l = p[1]; l 277 net/rose/rose_subr.c if (len < 2 + l) l 281 net/rose/rose_subr.c if (l < AX25_ADDR_LEN) l 289 net/rose/rose_subr.c if (l < AX25_ADDR_LEN) l 296 net/rose/rose_subr.c if (l < AX25_ADDR_LEN) l 301 net/rose/rose_subr.c if (l < 1 + ROSE_ADDR_LEN) l 306 net/rose/rose_subr.c if (l % AX25_ADDR_LEN) l 311 net/rose/rose_subr.c for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { l 323 net/rose/rose_subr.c p += l + 2; l 324 net/rose/rose_subr.c n += l + 2; l 325 net/rose/rose_subr.c len -= l + 2; l 335 net/rose/rose_subr.c unsigned char l, n = 0; l 367 net/rose/rose_subr.c l = p[1]; l 370 net/rose/rose_subr.c if (l < 10 || l > 20) l 375 net/rose/rose_subr.c memcpy(callsign, p + 12, l - 10); l 376 net/rose/rose_subr.c callsign[l - 10] = '\0'; l 381 net/rose/rose_subr.c memcpy(callsign, p + 12, l - 10); l 382 net/rose/rose_subr.c callsign[l - 10] = '\0'; l 385 net/rose/rose_subr.c p += l + 2; l 386 net/rose/rose_subr.c n += l + 2; l 387 net/rose/rose_subr.c len -= l + 2; l 1133 net/rxrpc/key.c #define ENCODE_DATA(l, s) \ l 1135 net/rxrpc/key.c u32 _l = (l); \ l 1136 net/rxrpc/key.c ENCODE(l); \ l 445 net/rxrpc/recvmsg.c struct list_head *l; l 501 net/rxrpc/recvmsg.c l = rx->recvmsg_q.next; l 502 net/rxrpc/recvmsg.c call = list_entry(l, struct rxrpc_call, recvmsg_link); l 1382 net/sched/sch_cake.c u32 l = m + m + 1; l 1383 net/sched/sch_cake.c u32 r = l + 1; l 1385 net/sched/sch_cake.c if (l < a) { l 1386 net/sched/sch_cake.c u32 lb = cake_heap_get_backlog(q, l); l 1389 net/sched/sch_cake.c m = l; l 503 net/smc/smc_core.c struct smc_link_group *lgr, *l; l 506 net/smc/smc_core.c list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { l 518 net/smc/smc_core.c struct smc_link_group *lgr, *l; l 523 net/smc/smc_core.c list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { l 534 net/smc/smc_core.c list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { l 644 net/sunrpc/auth_gss/svcauth_gss.c int l; l 649 net/sunrpc/auth_gss/svcauth_gss.c l = round_up_to_quad(o->len); l 650 net/sunrpc/auth_gss/svcauth_gss.c if (argv->iov_len < l) l 653 net/sunrpc/auth_gss/svcauth_gss.c argv->iov_base += l; l 654 net/sunrpc/auth_gss/svcauth_gss.c argv->iov_len -= l; l 1266 net/sunrpc/cache.c int h, l; l 1272 net/sunrpc/cache.c l = hex_to_bin(bp[1]); l 1273 net/sunrpc/cache.c if (l < 0) l 1276 net/sunrpc/cache.c *dest++ = (h << 4) | l; l 1412 net/sunrpc/xdr.c unsigned int l = min(avail_page, l 1427 net/sunrpc/xdr.c memcpy(c, elem + copied, l); l 1428 net/sunrpc/xdr.c copied += l; l 1432 net/sunrpc/xdr.c memcpy(elem + copied, c, l); l 1433 net/sunrpc/xdr.c copied += l; l 1441 net/sunrpc/xdr.c avail_page -= l; l 1442 net/sunrpc/xdr.c c += l; l 1452 net/sunrpc/xdr.c unsigned int l = min(avail_page, l 1467 net/sunrpc/xdr.c memcpy(c, elem + copied, l); l 1468 net/sunrpc/xdr.c copied += l; l 1472 net/sunrpc/xdr.c memcpy(elem + copied, c, l); l 1473 net/sunrpc/xdr.c copied += l; l 1499 net/sunrpc/xdr.c unsigned int l = desc->elem_size - copied; l 1502 net/sunrpc/xdr.c memcpy(c, elem + copied, l); l 1504 net/sunrpc/xdr.c memcpy(elem + copied, c, l); l 1509 net/sunrpc/xdr.c todo -= l; l 1510 net/sunrpc/xdr.c c += l; l 255 net/tipc/bcast.c struct tipc_link *l = tipc_bc_sndlink(net); l 261 net/tipc/bcast.c if (tipc_link_bc_peers(l)) l 262 net/tipc/bcast.c rc = tipc_link_xmit(l, pkts, &xmitq); l 429 net/tipc/bcast.c int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb) l 438 net/tipc/bcast.c if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) { l 445 net/tipc/bcast.c rc = tipc_link_bc_nack_rcv(l, skb, &xmitq); l 447 net/tipc/bcast.c rc = tipc_link_rcv(l, skb, NULL); l 463 net/tipc/bcast.c void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, l 477 net/tipc/bcast.c tipc_link_bc_ack_rcv(l, acked, &xmitq); l 491 net/tipc/bcast.c int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l, l 502 net/tipc/bcast.c tipc_link_bc_init_rcv(l, hdr); l 504 net/tipc/bcast.c tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq); l 505 net/tipc/bcast.c rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq); l 560 net/tipc/bcast.c struct tipc_link *l = tipc_bc_sndlink(net); l 562 net/tipc/bcast.c if (!l) l 566 net/tipc/bcast.c tipc_link_reset_stats(l); l 573 net/tipc/bcast.c struct tipc_link *l = tipc_bc_sndlink(net); l 575 net/tipc/bcast.c if (!l) l 582 net/tipc/bcast.c tipc_link_set_queue_limits(l, limit); l 681 net/tipc/bcast.c struct tipc_link *l = NULL; l 696 net/tipc/bcast.c &l)) l 698 net/tipc/bcast.c bb->link = l; l 699 net/tipc/bcast.c tn->bcl = l; l 705 net/tipc/bcast.c kfree(l); l 82 net/tipc/bcast.h void tipc_bcast_add_peer(struct net *net, struct tipc_link *l, l 92 net/tipc/bcast.h int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); l 93 net/tipc/bcast.h void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, l 95 net/tipc/bcast.h int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l, l 230 net/tipc/link.c static int link_is_up(struct tipc_link *l) l 232 net/tipc/link.c return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); l 235 net/tipc/link.c static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, l 237 net/tipc/link.c static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, l 241 net/tipc/link.c static void link_print(struct tipc_link *l, const char *str); l 242 net/tipc/link.c static int tipc_link_build_nack_msg(struct tipc_link *l, l 244 net/tipc/link.c static void tipc_link_build_bc_init_msg(struct tipc_link *l, l 246 net/tipc/link.c static bool tipc_link_release_pkts(struct tipc_link *l, u16 to); l 247 net/tipc/link.c static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data); l 248 net/tipc/link.c static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap, l 255 net/tipc/link.c bool tipc_link_is_up(struct tipc_link *l) l 257 net/tipc/link.c return link_is_up(l); l 260 net/tipc/link.c bool tipc_link_peer_is_down(struct tipc_link *l) l 262 net/tipc/link.c return l->state == LINK_PEER_RESET; l 265 net/tipc/link.c bool tipc_link_is_reset(struct tipc_link *l) l 267 net/tipc/link.c return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); l 270 net/tipc/link.c bool tipc_link_is_establishing(struct tipc_link *l) l 272 net/tipc/link.c return l->state == LINK_ESTABLISHING; l 275 net/tipc/link.c bool tipc_link_is_synching(struct tipc_link *l) l 277 net/tipc/link.c return l->state == LINK_SYNCHING; l 280 net/tipc/link.c bool tipc_link_is_failingover(struct tipc_link *l) l 282 net/tipc/link.c return l->state == LINK_FAILINGOVER; l 285 net/tipc/link.c bool tipc_link_is_blocked(struct tipc_link *l) l 287 net/tipc/link.c return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); l 290 net/tipc/link.c static bool link_is_bc_sndlink(struct tipc_link *l) l 292 net/tipc/link.c return !l->bc_sndlink; l 295 net/tipc/link.c static bool link_is_bc_rcvlink(struct tipc_link *l) l 297 net/tipc/link.c return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); l 300 net/tipc/link.c void tipc_link_set_active(struct tipc_link *l, bool active) l 302 net/tipc/link.c l->active = active; l 305 net/tipc/link.c u32 tipc_link_id(struct tipc_link *l) l 307 net/tipc/link.c return l->peer_bearer_id << 16 | l->bearer_id; l 310 net/tipc/link.c int tipc_link_window(struct tipc_link *l) l 312 net/tipc/link.c return l->window; l 315 net/tipc/link.c int tipc_link_prio(struct tipc_link *l) l 317 net/tipc/link.c return l->priority; l 320 net/tipc/link.c unsigned long tipc_link_tolerance(struct tipc_link *l) l 322 net/tipc/link.c return l->tolerance; l 325 net/tipc/link.c struct sk_buff_head *tipc_link_inputq(struct tipc_link *l) l 327 net/tipc/link.c return l->inputq; l 330 net/tipc/link.c char tipc_link_plane(struct tipc_link *l) l 332 net/tipc/link.c return l->net_plane; l 335 net/tipc/link.c void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) l 337 net/tipc/link.c l->peer_caps = capabilities; l 373 net/tipc/link.c int tipc_link_bc_peers(struct tipc_link *l) l 375 net/tipc/link.c return l->ackers; l 378 net/tipc/link.c static u16 link_bc_rcv_gap(struct tipc_link *l) l 380 net/tipc/link.c struct sk_buff *skb = skb_peek(&l->deferdq); l 383 net/tipc/link.c if (more(l->snd_nxt, l->rcv_nxt)) l 384 net/tipc/link.c gap = l->snd_nxt - l->rcv_nxt; l 386 net/tipc/link.c gap = buf_seqno(skb) - l->rcv_nxt; l 390 net/tipc/link.c void tipc_link_set_mtu(struct tipc_link *l, int mtu) l 392 net/tipc/link.c l->mtu = mtu; l 395 net/tipc/link.c int tipc_link_mtu(struct tipc_link *l) l 397 net/tipc/link.c return l->mtu; l 400 net/tipc/link.c u16 tipc_link_rcv_nxt(struct tipc_link *l) l 402 net/tipc/link.c return l->rcv_nxt; l 405 net/tipc/link.c u16 tipc_link_acked(struct tipc_link *l) l 407 net/tipc/link.c return l->acked; l 410 net/tipc/link.c char *tipc_link_name(struct tipc_link *l) l 412 net/tipc/link.c return l->name; l 415 net/tipc/link.c u32 tipc_link_state(struct tipc_link *l) l 417 net/tipc/link.c return l->state; l 454 net/tipc/link.c struct tipc_link *l; l 456 net/tipc/link.c l = kzalloc(sizeof(*l), GFP_ATOMIC); l 457 net/tipc/link.c if (!l) l 459 net/tipc/link.c *link = l; l 460 net/tipc/link.c l->session = session; l 472 net/tipc/link.c snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown", l 475 net/tipc/link.c strcpy(l->if_name, if_name); l 476 net/tipc/link.c l->addr = peer; l 477 net/tipc/link.c l->peer_caps = peer_caps; l 478 net/tipc/link.c l->net = net; l 479 net/tipc/link.c l->in_session = false; l 480 net/tipc/link.c l->bearer_id = bearer_id; l 481 net/tipc/link.c l->tolerance = tolerance; l 484 net/tipc/link.c l->net_plane = net_plane; l 485 net/tipc/link.c l->advertised_mtu = mtu; l 486 net/tipc/link.c l->mtu = mtu; l 487 net/tipc/link.c l->priority = priority; l 488 net/tipc/link.c tipc_link_set_queue_limits(l, window); l 489 net/tipc/link.c l->ackers = 1; l 490 net/tipc/link.c l->bc_sndlink = bc_sndlink; l 491 net/tipc/link.c l->bc_rcvlink = bc_rcvlink; l 492 net/tipc/link.c l->inputq = inputq; l 493 net/tipc/link.c l->namedq = namedq; l 494 net/tipc/link.c l->state = LINK_RESETTING; l 495 net/tipc/link.c __skb_queue_head_init(&l->transmq); l 496 net/tipc/link.c __skb_queue_head_init(&l->backlogq); l 497 net/tipc/link.c __skb_queue_head_init(&l->deferdq); l 498 net/tipc/link.c __skb_queue_head_init(&l->failover_deferdq); l 499 net/tipc/link.c skb_queue_head_init(&l->wakeupq); l 500 net/tipc/link.c skb_queue_head_init(l->inputq); l 522 net/tipc/link.c struct tipc_link *l; l 529 net/tipc/link.c l = *link; l 530 net/tipc/link.c strcpy(l->name, tipc_bclink_name); l 531 net/tipc/link.c trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!"); l 532 net/tipc/link.c tipc_link_reset(l); l 533 net/tipc/link.c l->state = LINK_RESET; l 534 net/tipc/link.c l->ackers = 0; l 535 net/tipc/link.c l->bc_rcvlink = l; l 538 net/tipc/link.c if (link_is_bc_sndlink(l)) l 539 net/tipc/link.c l->state = LINK_ESTABLISHED; l 542 net/tipc/link.c if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST)) l 553 net/tipc/link.c int tipc_link_fsm_evt(struct tipc_link *l, int evt) l 556 net/tipc/link.c int old_state = l->state; l 558 net/tipc/link.c switch (l->state) { l 562 net/tipc/link.c l->state = LINK_PEER_RESET; l 565 net/tipc/link.c l->state = LINK_RESET; l 580 net/tipc/link.c l->state = LINK_ESTABLISHING; l 583 net/tipc/link.c l->state = LINK_FAILINGOVER; l 598 net/tipc/link.c l->state = LINK_ESTABLISHING; l 615 net/tipc/link.c l->state = LINK_RESET; l 632 net/tipc/link.c l->state = LINK_ESTABLISHED; l 635 net/tipc/link.c l->state = LINK_FAILINGOVER; l 638 net/tipc/link.c l->state = LINK_RESET; l 653 net/tipc/link.c l->state = LINK_PEER_RESET; l 657 net/tipc/link.c l->state = LINK_RESETTING; l 661 net/tipc/link.c l->state = LINK_RESET; l 667 net/tipc/link.c l->state = LINK_SYNCHING; l 678 net/tipc/link.c l->state = LINK_PEER_RESET; l 682 net/tipc/link.c l->state = LINK_RESETTING; l 686 net/tipc/link.c l->state = LINK_RESET; l 692 net/tipc/link.c l->state = LINK_ESTABLISHED; l 701 net/tipc/link.c pr_err("Unknown FSM state %x in %s\n", l->state, l->name); l 703 net/tipc/link.c trace_tipc_link_fsm(l->name, old_state, l->state, evt); l 707 net/tipc/link.c evt, l->state, l->name); l 708 net/tipc/link.c trace_tipc_link_fsm(l->name, old_state, l->state, evt); l 714 net/tipc/link.c static void link_profile_stats(struct tipc_link *l) l 721 net/tipc/link.c l->stats.accu_queue_sz += skb_queue_len(&l->transmq); l 722 net/tipc/link.c l->stats.queue_sz_counts++; l 724 net/tipc/link.c skb = skb_peek(&l->transmq); l 735 net/tipc/link.c l->stats.msg_lengths_total += length; l 736 net/tipc/link.c l->stats.msg_length_counts++; l 738 net/tipc/link.c l->stats.msg_length_profile[0]++; l 740 net/tipc/link.c l->stats.msg_length_profile[1]++; l 742 net/tipc/link.c l->stats.msg_length_profile[2]++; l 744 net/tipc/link.c l->stats.msg_length_profile[3]++; l 746 net/tipc/link.c l->stats.msg_length_profile[4]++; l 748 net/tipc/link.c l->stats.msg_length_profile[5]++; l 750 net/tipc/link.c l->stats.msg_length_profile[6]++; l 760 net/tipc/link.c bool tipc_link_too_silent(struct tipc_link *l) l 762 net/tipc/link.c return (l->silent_intv_cnt + 2 > l->abort_limit); l 767 net/tipc/link.c int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) l 774 net/tipc/link.c u16 bc_snt = l->bc_sndlink->snd_nxt - 1; l 775 net/tipc/link.c u16 bc_acked = l->bc_rcvlink->acked; l 776 net/tipc/link.c struct tipc_mon_state *mstate = &l->mon_state; l 778 net/tipc/link.c trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " "); l 779 net/tipc/link.c trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " "); l 780 net/tipc/link.c switch (l->state) { l 784 net/tipc/link.c link_profile_stats(l); l 785 net/tipc/link.c tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); l 786 net/tipc/link.c if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) l 787 net/tipc/link.c return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); l 789 net/tipc/link.c state |= l->bc_rcvlink->rcv_unacked; l 790 net/tipc/link.c state |= l->rcv_unacked; l 791 net/tipc/link.c state |= !skb_queue_empty(&l->transmq); l 792 net/tipc/link.c state |= !skb_queue_empty(&l->deferdq); l 794 net/tipc/link.c probe |= l->silent_intv_cnt; l 796 net/tipc/link.c l->silent_intv_cnt++; l 799 net/tipc/link.c setup = l->rst_cnt++ <= 4; l 800 net/tipc/link.c setup |= !(l->rst_cnt % 16); l 816 net/tipc/link.c tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq); l 827 net/tipc/link.c static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr) l 829 net/tipc/link.c u32 dnode = tipc_own_addr(l->net); l 835 net/tipc/link.c dnode, l->addr, dport, 0, 0); l 840 net/tipc/link.c skb_queue_tail(&l->wakeupq, skb); l 841 net/tipc/link.c l->stats.link_congs++; l 842 net/tipc/link.c trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!"); l 852 net/tipc/link.c static void link_prepare_wakeup(struct tipc_link *l) l 854 net/tipc/link.c struct sk_buff_head *wakeupq = &l->wakeupq; l 855 net/tipc/link.c struct sk_buff_head *inputq = l->inputq; l 864 net/tipc/link.c avail[imp] = l->backlog[imp].limit - l->backlog[imp].len; l 881 net/tipc/link.c void tipc_link_reset(struct tipc_link *l) l 888 net/tipc/link.c l->in_session = false; l 890 net/tipc/link.c l->peer_session--; l 891 net/tipc/link.c l->session++; l 892 net/tipc/link.c l->mtu = l->advertised_mtu; l 894 net/tipc/link.c spin_lock_bh(&l->wakeupq.lock); l 895 net/tipc/link.c skb_queue_splice_init(&l->wakeupq, &list); l 896 net/tipc/link.c spin_unlock_bh(&l->wakeupq.lock); l 898 net/tipc/link.c spin_lock_bh(&l->inputq->lock); l 899 net/tipc/link.c skb_queue_splice_init(&list, l->inputq); l 900 net/tipc/link.c spin_unlock_bh(&l->inputq->lock); l 902 net/tipc/link.c __skb_queue_purge(&l->transmq); l 903 net/tipc/link.c __skb_queue_purge(&l->deferdq); l 904 net/tipc/link.c __skb_queue_purge(&l->backlogq); l 905 net/tipc/link.c __skb_queue_purge(&l->failover_deferdq); l 907 net/tipc/link.c l->backlog[imp].len = 0; l 908 net/tipc/link.c l->backlog[imp].target_bskb = NULL; l 910 net/tipc/link.c kfree_skb(l->reasm_buf); l 911 net/tipc/link.c kfree_skb(l->reasm_tnlmsg); l 912 net/tipc/link.c kfree_skb(l->failover_reasm_skb); l 913 net/tipc/link.c l->reasm_buf = NULL; l 914 net/tipc/link.c l->reasm_tnlmsg = NULL; l 915 net/tipc/link.c l->failover_reasm_skb = NULL; l 916 net/tipc/link.c l->rcv_unacked = 0; l 917 net/tipc/link.c l->snd_nxt = 1; l 918 net/tipc/link.c l->rcv_nxt = 1; l 919 net/tipc/link.c l->snd_nxt_state = 1; l 920 net/tipc/link.c l->rcv_nxt_state = 1; l 921 net/tipc/link.c l->acked = 0; l 922 net/tipc/link.c l->silent_intv_cnt = 0; l 923 net/tipc/link.c l->rst_cnt = 0; l 924 net/tipc/link.c l->bc_peer_is_up = false; l 925 net/tipc/link.c memset(&l->mon_state, 0, sizeof(l->mon_state)); l 926 net/tipc/link.c tipc_link_reset_stats(l); l 939 net/tipc/link.c int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, l 943 net/tipc/link.c unsigned int maxwin = l->window; l 945 net/tipc/link.c unsigned int mtu = l->mtu; l 946 net/tipc/link.c u16 ack = l->rcv_nxt - 1; l 947 net/tipc/link.c u16 seqno = l->snd_nxt; l 948 net/tipc/link.c u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; l 949 net/tipc/link.c struct sk_buff_head *transmq = &l->transmq; l 950 net/tipc/link.c struct sk_buff_head *backlogq = &l->backlogq; l 964 net/tipc/link.c if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { l 966 net/tipc/link.c pr_warn("%s<%s>, link overflow", link_rst_msg, l->name); l 969 net/tipc/link.c rc = link_schedule_user(l, hdr); l 973 net/tipc/link.c l->stats.sent_fragmented++; l 974 net/tipc/link.c l->stats.sent_fragments += pkt_cnt; l 994 net/tipc/link.c if (link_is_bc_sndlink(l)) l 997 net/tipc/link.c TIPC_SKB_CB(skb)->ackers = l->ackers; l 998 net/tipc/link.c l->rcv_unacked = 0; l 999 net/tipc/link.c l->stats.sent_pkts++; l 1003 net/tipc/link.c tskb = &l->backlog[imp].target_bskb; l 1006 net/tipc/link.c l->stats.sent_bundled++; l 1009 net/tipc/link.c if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) { l 1012 net/tipc/link.c l->backlog[imp].len++; l 1013 net/tipc/link.c l->stats.sent_bundled++; l 1014 net/tipc/link.c l->stats.sent_bundles++; l 1017 net/tipc/link.c l->backlog[imp].target_bskb = NULL; l 1018 net/tipc/link.c l->backlog[imp].len += skb_queue_len(list); l 1021 net/tipc/link.c l->snd_nxt = seqno; l 1025 net/tipc/link.c static void tipc_link_advance_backlog(struct tipc_link *l, l 1030 net/tipc/link.c u16 seqno = l->snd_nxt; l 1031 net/tipc/link.c u16 ack = l->rcv_nxt - 1; l 1032 net/tipc/link.c u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; l 1035 net/tipc/link.c while (skb_queue_len(&l->transmq) < l->window) { l 1036 net/tipc/link.c skb = skb_peek(&l->backlogq); l 1042 net/tipc/link.c __skb_dequeue(&l->backlogq); l 1045 net/tipc/link.c l->backlog[imp].len--; l 1046 net/tipc/link.c if (unlikely(skb == l->backlog[imp].target_bskb)) l 1047 net/tipc/link.c l->backlog[imp].target_bskb = NULL; l 1048 net/tipc/link.c __skb_queue_tail(&l->transmq, skb); l 1050 net/tipc/link.c if (link_is_bc_sndlink(l)) l 1054 net/tipc/link.c TIPC_SKB_CB(skb)->ackers = l->ackers; l 1058 net/tipc/link.c l->rcv_unacked = 0; l 1059 net/tipc/link.c l->stats.sent_pkts++; l 1062 net/tipc/link.c l->snd_nxt = seqno; l 1074 net/tipc/link.c static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, l 1077 net/tipc/link.c struct sk_buff *skb = skb_peek(&l->transmq); l 1091 net/tipc/link.c if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr))) l 1094 net/tipc/link.c pr_warn("Retransmission failure on link <%s>\n", l->name); l 1095 net/tipc/link.c link_print(l, "State of link "); l 1104 net/tipc/link.c trace_tipc_list_dump(&l->transmq, true, "retrans failure!"); l 1105 net/tipc/link.c trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!"); l 1108 net/tipc/link.c if (link_is_bc_sndlink(l)) { l 1112 net/tipc/link.c *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); l 1125 net/tipc/link.c static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r, l 1128 net/tipc/link.c struct sk_buff *_skb, *skb = skb_peek(&l->transmq); l 1129 net/tipc/link.c u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; l 1130 net/tipc/link.c u16 ack = l->rcv_nxt - 1; l 1139 net/tipc/link.c trace_tipc_link_retrans(r, from, to, &l->transmq); l 1141 net/tipc/link.c if (link_retransmit_failure(l, r, &rc)) l 1144 net/tipc/link.c skb_queue_walk(&l->transmq, skb) { l 1162 net/tipc/link.c l->stats.retransmitted++; l 1176 net/tipc/link.c static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, l 1179 net/tipc/link.c struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq; l 1199 net/tipc/link.c l->bc_rcvlink->state = LINK_ESTABLISHED; l 1200 net/tipc/link.c skb_queue_tail(l->namedq, skb); l 1218 net/tipc/link.c static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, l 1230 net/tipc/link.c l->stats.recv_bundles++; l 1231 net/tipc/link.c l->stats.recv_bundled += msg_msgcnt(hdr); l 1233 net/tipc/link.c tipc_data_input(l, iskb, &tmpq); l 1237 net/tipc/link.c l->stats.recv_fragments++; l 1239 net/tipc/link.c l->stats.recv_fragmented++; l 1240 net/tipc/link.c tipc_data_input(l, skb, inputq); l 1241 net/tipc/link.c } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) { l 1243 net/tipc/link.c return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); l 1247 net/tipc/link.c tipc_bcast_lock(l->net); l 1248 net/tipc/link.c tipc_link_bc_init_rcv(l->bc_rcvlink, hdr); l 1249 net/tipc/link.c tipc_bcast_unlock(l->net); l 1263 net/tipc/link.c static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb, l 1266 net/tipc/link.c struct sk_buff **reasm_skb = &l->failover_reasm_skb; l 1267 net/tipc/link.c struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg; l 1268 net/tipc/link.c struct sk_buff_head *fdefq = &l->failover_deferdq; l 1299 net/tipc/link.c if (*reasm_tnlmsg || link_is_bc_rcvlink(l)) l 1302 net/tipc/link.c return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); l 1309 net/tipc/link.c if (unlikely(less(seqno, l->drop_point))) { l 1313 net/tipc/link.c if (unlikely(seqno != l->drop_point)) { l 1318 net/tipc/link.c l->drop_point++; l 1319 net/tipc/link.c if (!tipc_data_input(l, iskb, inputq)) l 1320 net/tipc/link.c rc |= tipc_link_input(l, iskb, inputq, reasm_skb); l 1323 net/tipc/link.c } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point))); l 1328 net/tipc/link.c static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked) l 1333 net/tipc/link.c skb_queue_walk_safe(&l->transmq, skb, tmp) { l 1336 net/tipc/link.c __skb_unlink(skb, &l->transmq); l 1349 net/tipc/link.c static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data) l 1351 net/tipc/link.c struct sk_buff *skb = skb_peek(&l->deferdq); l 1360 net/tipc/link.c skb_queue_walk(&l->deferdq, skb) { l 1400 net/tipc/link.c static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap, l 1406 net/tipc/link.c u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; l 1407 net/tipc/link.c u16 ack = l->rcv_nxt - 1; l 1412 net/tipc/link.c skb_queue_walk_safe(&l->transmq, skb, tmp) { l 1418 net/tipc/link.c __skb_unlink(skb, &l->transmq); l 1422 net/tipc/link.c if (!passed && link_retransmit_failure(l, l, &rc)) l 1439 net/tipc/link.c l->stats.retransmitted++; l 1463 net/tipc/link.c int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq) l 1465 net/tipc/link.c if (!l) l 1469 net/tipc/link.c if (link_is_bc_rcvlink(l)) { l 1470 net/tipc/link.c if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf) l 1472 net/tipc/link.c l->rcv_unacked = 0; l 1475 net/tipc/link.c l->snd_nxt = l->rcv_nxt; l 1480 net/tipc/link.c l->rcv_unacked = 0; l 1481 net/tipc/link.c l->stats.sent_acks++; l 1482 net/tipc/link.c tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq); l 1488 net/tipc/link.c void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) l 1493 net/tipc/link.c if (l->state == LINK_ESTABLISHING) l 1496 net/tipc/link.c tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq); l 1500 net/tipc/link.c if (skb && (l->state == LINK_RESET)) l 1508 net/tipc/link.c static int tipc_link_build_nack_msg(struct tipc_link *l, l 1511 net/tipc/link.c u32 def_cnt = ++l->stats.deferred_recv; l 1512 net/tipc/link.c u32 defq_len = skb_queue_len(&l->deferdq); l 1515 net/tipc/link.c if (link_is_bc_rcvlink(l)) { l 1517 net/tipc/link.c match2 = tipc_own_addr(l->net) & 0xf; l 1524 net/tipc/link.c tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq); l 1533 net/tipc/link.c int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, l 1536 net/tipc/link.c struct sk_buff_head *defq = &l->deferdq; l 1543 net/tipc/link.c return tipc_link_proto_rcv(l, skb, xmitq); l 1546 net/tipc/link.c l->silent_intv_cnt = 0; l 1551 net/tipc/link.c rcv_nxt = l->rcv_nxt; l 1554 net/tipc/link.c if (unlikely(!link_is_up(l))) { l 1555 net/tipc/link.c if (l->state == LINK_ESTABLISHING) l 1562 net/tipc/link.c l->stats.duplicates++; l 1567 net/tipc/link.c if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) { l 1568 net/tipc/link.c tipc_link_advance_backlog(l, xmitq); l 1569 net/tipc/link.c if (unlikely(!skb_queue_empty(&l->wakeupq))) l 1570 net/tipc/link.c link_prepare_wakeup(l); l 1576 net/tipc/link.c rc |= tipc_link_build_nack_msg(l, xmitq); l 1581 net/tipc/link.c l->rcv_nxt++; l 1582 net/tipc/link.c l->stats.recv_pkts++; l 1585 net/tipc/link.c rc |= tipc_link_tnl_rcv(l, skb, l->inputq); l 1586 net/tipc/link.c else if (!tipc_data_input(l, skb, l->inputq)) l 1587 net/tipc/link.c rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf); l 1588 net/tipc/link.c if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) l 1589 net/tipc/link.c rc |= tipc_link_build_state_msg(l, xmitq); l 1592 net/tipc/link.c } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt))); l 1600 net/tipc/link.c static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, l 1605 net/tipc/link.c struct tipc_link *bcl = l->bc_rcvlink; l 1608 net/tipc/link.c struct sk_buff_head *dfq = &l->deferdq; l 1610 net/tipc/link.c struct tipc_mon_state *mstate = &l->mon_state; l 1616 net/tipc/link.c if (tipc_link_is_blocked(l)) l 1619 net/tipc/link.c if (!tipc_link_is_up(l) && (mtyp == STATE_MSG)) l 1623 net/tipc/link.c rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; l 1627 net/tipc/link.c l->addr, tipc_own_addr(l->net), 0, 0, 0); l 1633 net/tipc/link.c msg_set_session(hdr, l->session); l 1634 net/tipc/link.c msg_set_bearer_id(hdr, l->bearer_id); l 1635 net/tipc/link.c msg_set_net_plane(hdr, l->net_plane); l 1636 net/tipc/link.c msg_set_next_sent(hdr, l->snd_nxt); l 1637 net/tipc/link.c msg_set_ack(hdr, l->rcv_nxt - 1); l 1640 net/tipc/link.c msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); l 1645 net/tipc/link.c msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); l 1648 net/tipc/link.c if (l->peer_caps & TIPC_LINK_PROTO_SEQNO) l 1649 net/tipc/link.c msg_set_seqno(hdr, l->snd_nxt_state++); l 1654 net/tipc/link.c if (l->peer_caps & TIPC_GAP_ACK_BLOCK) l 1655 net/tipc/link.c glen = tipc_build_gap_ack_blks(l, data); l 1656 net/tipc/link.c tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id); l 1659 net/tipc/link.c l->stats.sent_states++; l 1660 net/tipc/link.c l->rcv_unacked = 0; l 1665 net/tipc/link.c msg_set_dest_session(hdr, l->peer_session); l 1667 net/tipc/link.c msg_set_max_pkt(hdr, l->advertised_mtu); l 1668 net/tipc/link.c strcpy(data, l->if_name); l 1673 net/tipc/link.c l->stats.sent_probes++; l 1675 net/tipc/link.c l->stats.sent_nacks++; l 1678 net/tipc/link.c trace_tipc_proto_build(skb, false, l->name); l 1681 net/tipc/link.c void tipc_link_create_dummy_tnl_msg(struct tipc_link *l, l 1684 net/tipc/link.c u32 onode = tipc_own_addr(l->net); l 1688 net/tipc/link.c u32 dnode = l->addr; l 1701 net/tipc/link.c msg_set_bearer_id(hdr, l->peer_bearer_id); l 1708 net/tipc/link.c tipc_link_xmit(l, &tnlq, xmitq); l 1714 net/tipc/link.c void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, l 1720 net/tipc/link.c struct sk_buff_head *queue = &l->transmq; l 1722 net/tipc/link.c u16 pktlen, pktcnt, seqno = l->snd_nxt; l 1736 net/tipc/link.c BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), l 1743 net/tipc/link.c tipc_link_xmit(l, &tnlq, &tmpxq); l 1753 net/tipc/link.c INT_H_SIZE, 0, l->addr, l 1754 net/tipc/link.c tipc_own_addr(l->net), l 1763 net/tipc/link.c syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1; l 1765 net/tipc/link.c msg_set_bearer_id(hdr, l->peer_bearer_id); l 1772 net/tipc/link.c tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL, l 1773 net/tipc/link.c mtyp, INT_H_SIZE, l->addr); l 1775 net/tipc/link.c pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq)); l 1777 net/tipc/link.c pktcnt = skb_queue_len(&l->transmq); l 1778 net/tipc/link.c pktcnt += skb_queue_len(&l->backlogq); l 1780 net/tipc/link.c msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); l 1785 net/tipc/link.c if (queue == &l->backlogq) l 1831 net/tipc/link.c if (queue != &l->backlogq) { l 1832 net/tipc/link.c queue = &l->backlogq; l 1845 net/tipc/link.c tnl->drop_point = l->rcv_nxt; l 1846 net/tipc/link.c tnl->failover_reasm_skb = l->reasm_buf; l 1847 net/tipc/link.c l->reasm_buf = NULL; l 1855 net/tipc/link.c skb_queue_splice_init(&l->deferdq, fdefq); l 1869 net/tipc/link.c void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl, l 1896 net/tipc/link.c bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr) l 1898 net/tipc/link.c u16 curr_session = l->peer_session; l 1907 net/tipc/link.c if (!l->in_session) l 1912 net/tipc/link.c if (!l->in_session) l 1918 net/tipc/link.c if (!l->in_session) l 1923 net/tipc/link.c if (!link_is_up(l) && msg_ack(hdr)) l 1925 net/tipc/link.c if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) l 1928 net/tipc/link.c return !less(msg_seqno(hdr), l->rcv_nxt_state); l 1939 net/tipc/link.c static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, l 1950 net/tipc/link.c u16 rcv_nxt = l->rcv_nxt; l 1959 net/tipc/link.c trace_tipc_proto_rcv(skb, false, l->name); l 1960 net/tipc/link.c if (tipc_link_is_blocked(l) || !xmitq) l 1963 net/tipc/link.c if (tipc_own_addr(l->net) > msg_prevnode(hdr)) l 1964 net/tipc/link.c l->net_plane = msg_net_plane(hdr); l 1970 net/tipc/link.c if (!tipc_link_validate_msg(l, hdr)) { l 1972 net/tipc/link.c trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!"); l 1980 net/tipc/link.c if_name = strrchr(l->name, ':') + 1; l 1981 net/tipc/link.c if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) l 1989 net/tipc/link.c l->tolerance = peers_tol; l 1990 net/tipc/link.c l->bc_rcvlink->tolerance = peers_tol; l 1993 net/tipc/link.c if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) l 1994 net/tipc/link.c l->priority = peers_prio; l 1998 net/tipc/link.c rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); l 2006 net/tipc/link.c l->session != msg_dest_session(hdr)) { l 2007 net/tipc/link.c if (less(l->session, msg_dest_session(hdr))) l 2008 net/tipc/link.c l->session = msg_dest_session(hdr) + 1; l 2013 net/tipc/link.c if (mtyp == RESET_MSG || !link_is_up(l)) l 2014 net/tipc/link.c rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); l 2017 net/tipc/link.c if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING) l 2020 net/tipc/link.c l->peer_session = msg_session(hdr); l 2021 net/tipc/link.c l->in_session = true; l 2022 net/tipc/link.c l->peer_bearer_id = msg_bearer_id(hdr); l 2023 net/tipc/link.c if (l->mtu > msg_max_pkt(hdr)) l 2024 net/tipc/link.c l->mtu = msg_max_pkt(hdr); l 2028 net/tipc/link.c l->rcv_nxt_state = msg_seqno(hdr) + 1; l 2032 net/tipc/link.c l->tolerance = peers_tol; l 2033 net/tipc/link.c l->bc_rcvlink->tolerance = peers_tol; l 2036 net/tipc/link.c if ((peers_prio != l->priority) && l 2038 net/tipc/link.c l->priority = peers_prio; l 2039 net/tipc/link.c rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); l 2042 net/tipc/link.c l->silent_intv_cnt = 0; l 2043 net/tipc/link.c l->stats.recv_states++; l 2045 net/tipc/link.c l->stats.recv_probes++; l 2047 net/tipc/link.c if (!link_is_up(l)) { l 2048 net/tipc/link.c if (l->state == LINK_ESTABLISHING) l 2054 net/tipc/link.c if (l->peer_caps & TIPC_GAP_ACK_BLOCK) { l 2062 net/tipc/link.c tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr, l 2063 net/tipc/link.c &l->mon_state, l->bearer_id); l 2066 net/tipc/link.c if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) l 2067 net/tipc/link.c rcvgap = peers_snd_nxt - l->rcv_nxt; l 2069 net/tipc/link.c tipc_link_build_proto_msg(l, STATE_MSG, 0, reply, l 2072 net/tipc/link.c rc |= tipc_link_advance_transmq(l, ack, gap, ga, xmitq); l 2076 net/tipc/link.c l->stats.recv_nacks++; l 2078 net/tipc/link.c tipc_link_advance_backlog(l, xmitq); l 2079 net/tipc/link.c if (unlikely(!skb_queue_empty(&l->wakeupq))) l 2080 net/tipc/link.c link_prepare_wakeup(l); l 2089 net/tipc/link.c static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast, l 2095 net/tipc/link.c struct sk_buff *dfrd_skb = skb_peek(&l->deferdq); l 2096 net/tipc/link.c u16 ack = l->rcv_nxt - 1; l 2100 net/tipc/link.c 0, l->addr, tipc_own_addr(l->net), 0, 0, 0); l 2104 net/tipc/link.c msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); l 2120 net/tipc/link.c static void tipc_link_build_bc_init_msg(struct tipc_link *l, l 2126 net/tipc/link.c if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) l 2129 net/tipc/link.c tipc_link_xmit(l, &list, xmitq); l 2134 net/tipc/link.c void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) l 2139 net/tipc/link.c if (link_is_up(l)) l 2143 net/tipc/link.c l->rcv_nxt = peers_snd_nxt; l 2144 net/tipc/link.c l->state = LINK_ESTABLISHED; l 2148 net/tipc/link.c if (l->peer_caps & TIPC_BCAST_SYNCH) l 2156 net/tipc/link.c l->rcv_nxt = peers_snd_nxt; l 2161 net/tipc/link.c int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, l 2164 net/tipc/link.c struct tipc_link *snd_l = l->bc_sndlink; l 2170 net/tipc/link.c if (!link_is_up(l)) l 2178 net/tipc/link.c l->bc_peer_is_up = true; l 2180 net/tipc/link.c if (!l->bc_peer_is_up) l 2183 net/tipc/link.c l->stats.recv_nacks++; l 2186 net/tipc/link.c if (more(peers_snd_nxt, l->rcv_nxt + l->window)) l 2189 net/tipc/link.c rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq); l 2191 net/tipc/link.c l->snd_nxt = peers_snd_nxt; l 2192 net/tipc/link.c if (link_bc_rcv_gap(l)) l 2196 net/tipc/link.c if (l->peer_caps & TIPC_BCAST_STATE_NACK) l 2201 net/tipc/link.c if (!more(peers_snd_nxt, l->rcv_nxt)) { l 2202 net/tipc/link.c l->nack_state = BC_NACK_SND_CONDITIONAL; l 2207 net/tipc/link.c if (l->nack_state == BC_NACK_SND_SUPPRESS) { l 2208 net/tipc/link.c l->nack_state = BC_NACK_SND_UNCONDITIONAL; l 2213 net/tipc/link.c if (l->nack_state == BC_NACK_SND_CONDITIONAL) { l 2214 net/tipc/link.c l->nack_state = BC_NACK_SND_UNCONDITIONAL; l 2215 net/tipc/link.c if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN) l 2220 net/tipc/link.c tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq); l 2221 net/tipc/link.c l->nack_state = BC_NACK_SND_SUPPRESS; l 2225 net/tipc/link.c void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, l 2229 net/tipc/link.c struct tipc_link *snd_l = l->bc_sndlink; l 2231 net/tipc/link.c if (!link_is_up(l) || !l->bc_peer_is_up) l 2234 net/tipc/link.c if (!more(acked, l->acked)) l 2237 net/tipc/link.c trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq); l 2240 net/tipc/link.c if (more(buf_seqno(skb), l->acked)) l 2253 net/tipc/link.c l->acked = acked; l 2263 net/tipc/link.c int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, l 2277 net/tipc/link.c if (!tipc_link_is_up(l) || !l->bc_peer_is_up) l 2283 net/tipc/link.c if (dnode == tipc_own_addr(l->net)) { l 2284 net/tipc/link.c tipc_link_bc_ack_rcv(l, acked, xmitq); l 2285 net/tipc/link.c rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq); l 2286 net/tipc/link.c l->stats.recv_nacks++; l 2291 net/tipc/link.c if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from)) l 2292 net/tipc/link.c l->nack_state = BC_NACK_SND_SUPPRESS; l 2297 net/tipc/link.c void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) l 2299 net/tipc/link.c int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE); l 2301 net/tipc/link.c l->window = win; l 2302 net/tipc/link.c l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win); l 2303 net/tipc/link.c l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2); l 2304 net/tipc/link.c l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3); l 2305 net/tipc/link.c l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4); l 2306 net/tipc/link.c l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; l 2313 net/tipc/link.c void tipc_link_reset_stats(struct tipc_link *l) l 2315 net/tipc/link.c memset(&l->stats, 0, sizeof(l->stats)); l 2318 net/tipc/link.c static void link_print(struct tipc_link *l, const char *str) l 2320 net/tipc/link.c struct sk_buff *hskb = skb_peek(&l->transmq); l 2321 net/tipc/link.c u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; l 2322 net/tipc/link.c u16 tail = l->snd_nxt - 1; l 2324 net/tipc/link.c pr_info("%s Link <%s> state %x\n", str, l->name, l->state); l 2326 net/tipc/link.c skb_queue_len(&l->transmq), head, tail, l 2327 net/tipc/link.c skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); l 2628 net/tipc/link.c void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, l 2631 net/tipc/link.c l->tolerance = tol; l 2632 net/tipc/link.c if (l->bc_rcvlink) l 2633 net/tipc/link.c l->bc_rcvlink->tolerance = tol; l 2634 net/tipc/link.c if (link_is_up(l)) l 2635 net/tipc/link.c tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); l 2638 net/tipc/link.c void tipc_link_set_prio(struct tipc_link *l, u32 prio, l 2641 net/tipc/link.c l->priority = prio; l 2642 net/tipc/link.c tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq); l 2645 net/tipc/link.c void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit) l 2647 net/tipc/link.c l->abort_limit = limit; l 2650 net/tipc/link.c char *tipc_link_name_ext(struct tipc_link *l, char *buf) l 2652 net/tipc/link.c if (!l) l 2654 net/tipc/link.c else if (link_is_bc_sndlink(l)) l 2656 net/tipc/link.c else if (link_is_bc_rcvlink(l)) l 2658 net/tipc/link.c "broadcast-receiver, peer %x", l->addr); l 2660 net/tipc/link.c memcpy(buf, l->name, TIPC_MAX_LINK_NAME); l 2678 net/tipc/link.c int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf) l 2686 net/tipc/link.c if (!l) { l 2691 net/tipc/link.c i += scnprintf(buf, sz, "link data: %x", l->addr); l 2692 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %x", l->state); l 2693 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->in_session); l 2694 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->session); l 2695 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->peer_session); l 2696 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt); l 2697 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt); l 2698 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state); l 2699 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state); l 2700 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); l 2701 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); l 2702 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); l 2705 net/tipc/link.c i += scnprintf(buf + i, sz - i, " %u", l->acked); l 2707 net/tipc/link.c list = &l->transmq; l 2715 net/tipc/link.c list = &l->deferdq; l 2723 net/tipc/link.c list = &l->backlogq; l 2731 net/tipc/link.c list = l->inputq; l 2741 net/tipc/link.c i += tipc_list_dump(&l->transmq, false, buf + i); l 2746 net/tipc/link.c l->backlog[TIPC_LOW_IMPORTANCE].len, l 2747 net/tipc/link.c l->backlog[TIPC_MEDIUM_IMPORTANCE].len, l 2748 net/tipc/link.c l->backlog[TIPC_HIGH_IMPORTANCE].len, l 2749 net/tipc/link.c l->backlog[TIPC_CRITICAL_IMPORTANCE].len, l 2750 net/tipc/link.c l->backlog[TIPC_SYSTEM_IMPORTANCE].len); l 2751 net/tipc/link.c i += tipc_list_dump(&l->backlogq, false, buf + i); l 2755 net/tipc/link.c i += tipc_list_dump(&l->deferdq, false, buf + i); l 2759 net/tipc/link.c i += tipc_list_dump(l->inputq, false, buf + i); l 2763 net/tipc/link.c i += tipc_list_dump(&l->wakeupq, false, buf + i); l 89 net/tipc/link.h void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, l 93 net/tipc/link.h void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl, l 95 net/tipc/link.h void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq); l 96 net/tipc/link.h int tipc_link_fsm_evt(struct tipc_link *l, int evt); l 97 net/tipc/link.h bool tipc_link_is_up(struct tipc_link *l); l 98 net/tipc/link.h bool tipc_link_peer_is_down(struct tipc_link *l); l 99 net/tipc/link.h bool tipc_link_is_reset(struct tipc_link *l); l 100 net/tipc/link.h bool tipc_link_is_establishing(struct tipc_link *l); l 101 net/tipc/link.h bool tipc_link_is_synching(struct tipc_link *l); l 102 net/tipc/link.h bool tipc_link_is_failingover(struct tipc_link *l); l 103 net/tipc/link.h bool tipc_link_is_blocked(struct tipc_link *l); l 104 net/tipc/link.h void tipc_link_set_active(struct tipc_link *l, bool active); l 105 net/tipc/link.h void tipc_link_reset(struct tipc_link *l); l 106 net/tipc/link.h void tipc_link_reset_stats(struct tipc_link *l); l 109 net/tipc/link.h struct sk_buff_head *tipc_link_inputq(struct tipc_link *l); l 110 net/tipc/link.h u16 tipc_link_rcv_nxt(struct tipc_link *l); l 111 net/tipc/link.h u16 tipc_link_acked(struct tipc_link *l); l 112 net/tipc/link.h u32 tipc_link_id(struct tipc_link *l); l 113 net/tipc/link.h char *tipc_link_name(struct tipc_link *l); l 114 net/tipc/link.h char *tipc_link_name_ext(struct tipc_link *l, char *buf); l 115 net/tipc/link.h u32 tipc_link_state(struct tipc_link *l); l 116 net/tipc/link.h char tipc_link_plane(struct tipc_link *l); l 117 net/tipc/link.h int tipc_link_prio(struct tipc_link *l); l 118 net/tipc/link.h int tipc_link_window(struct tipc_link *l); l 119 net/tipc/link.h void tipc_link_update_caps(struct tipc_link *l, u16 capabilities); l 120 net/tipc/link.h bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr); l 121 net/tipc/link.h unsigned long tipc_link_tolerance(struct tipc_link *l); l 122 net/tipc/link.h void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, l 124 net/tipc/link.h void tipc_link_set_prio(struct tipc_link *l, u32 prio, l 126 net/tipc/link.h void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit); l 127 net/tipc/link.h void tipc_link_set_queue_limits(struct tipc_link *l, u32 window); l 131 net/tipc/link.h int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq); l 132 net/tipc/link.h int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, l 134 net/tipc/link.h int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq); l 141 net/tipc/link.h int tipc_link_bc_peers(struct tipc_link *l); l 142 net/tipc/link.h void tipc_link_set_mtu(struct tipc_link *l, int mtu); l 143 net/tipc/link.h int tipc_link_mtu(struct tipc_link *l); l 144 net/tipc/link.h void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, l 146 net/tipc/link.h void tipc_link_build_bc_sync_msg(struct tipc_link *l, l 148 net/tipc/link.h void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr); l 149 net/tipc/link.h int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, l 151 net/tipc/link.h int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, l 153 net/tipc/link.h bool tipc_link_too_silent(struct tipc_link *l); l 983 net/tipc/name_table.c struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) l 987 net/tipc/name_table.c list_for_each_entry(dst, l, list) { l 994 net/tipc/name_table.c bool tipc_dest_push(struct list_head *l, u32 node, u32 port) l 998 net/tipc/name_table.c if (tipc_dest_find(l, node, port)) l 1006 net/tipc/name_table.c list_add(&dst->list, l); l 1010 net/tipc/name_table.c bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port) l 1014 net/tipc/name_table.c if (list_empty(l)) l 1016 net/tipc/name_table.c dst = list_first_entry(l, typeof(*dst), list); l 1026 net/tipc/name_table.c bool tipc_dest_del(struct list_head *l, u32 node, u32 port) l 1030 net/tipc/name_table.c dst = tipc_dest_find(l, node, port); l 1038 net/tipc/name_table.c void tipc_dest_list_purge(struct list_head *l) l 1042 net/tipc/name_table.c list_for_each_entry_safe(dst, tmp, l, list) { l 1048 net/tipc/name_table.c int tipc_dest_list_len(struct list_head *l) l 1053 net/tipc/name_table.c list_for_each_entry(dst, l, list) { l 141 net/tipc/name_table.h struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); l 142 net/tipc/name_table.h bool tipc_dest_push(struct list_head *l, u32 node, u32 port); l 143 net/tipc/name_table.h bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port); l 144 net/tipc/name_table.h bool tipc_dest_del(struct list_head *l, u32 node, u32 port); l 145 net/tipc/name_table.h void tipc_dest_list_purge(struct list_head *l); l 146 net/tipc/name_table.h int tipc_dest_list_len(struct list_head *l); l 368 net/tipc/node.c struct tipc_link *l; l 381 net/tipc/node.c l = n->links[bearer_id].link; l 382 net/tipc/node.c if (l) l 383 net/tipc/node.c tipc_link_update_caps(l, capabilities); l 453 net/tipc/node.c static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) l 455 net/tipc/node.c unsigned long tol = tipc_link_tolerance(l); l 463 net/tipc/node.c tipc_link_set_abort_limit(l, tol / n->keepalive_intv); l 778 net/tipc/node.c static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l, l 787 net/tipc/node.c if (l && !tipc_link_is_reset(l)) l 794 net/tipc/node.c tipc_link_failover_prepare(l, tnl, xmitq); l 796 net/tipc/node.c if (l) l 797 net/tipc/node.c tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); l 812 net/tipc/node.c struct tipc_link *l, *_l, *tnl; l 814 net/tipc/node.c l = n->links[*bearer_id].link; l 815 net/tipc/node.c if (!l || tipc_link_is_reset(l)) l 820 net/tipc/node.c n->link_id = tipc_link_id(l); l 825 net/tipc/node.c tipc_link_name(l), tipc_link_plane(l)); l 834 net/tipc/node.c if (_l == l) l 849 net/tipc/node.c if (tipc_link_peer_is_down(l)) l 852 net/tipc/node.c trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!"); l 853 net/tipc/node.c tipc_link_fsm_evt(l, LINK_RESET_EVT); l 854 net/tipc/node.c tipc_link_reset(l); l 855 net/tipc/node.c tipc_link_build_reset_msg(l, xmitq); l 869 net/tipc/node.c tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); l 870 net/tipc/node.c trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!"); l 871 net/tipc/node.c tipc_link_reset(l); l 872 net/tipc/node.c tipc_link_fsm_evt(l, LINK_RESET_EVT); l 873 net/tipc/node.c tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); l 882 net/tipc/node.c struct tipc_link *l = le->link; l 886 net/tipc/node.c if (!l) l 892 net/tipc/node.c if (!tipc_link_is_establishing(l)) { l 896 net/tipc/node.c tipc_link_reset(l); l 897 net/tipc/node.c tipc_link_fsm_evt(l, LINK_RESET_EVT); l 900 net/tipc/node.c kfree(l); l 987 net/tipc/node.c struct tipc_link *l; l 1010 net/tipc/node.c l = le->link; l 1011 net/tipc/node.c link_up = l && tipc_link_is_up(l); l 1012 net/tipc/node.c addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); l 1078 net/tipc/node.c if (!l) { l 1091 net/tipc/node.c &n->bc_entry.namedq, &l)) { l 1095 net/tipc/node.c trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!"); l 1096 net/tipc/node.c tipc_link_reset(l); l 1097 net/tipc/node.c tipc_link_fsm_evt(l, LINK_RESET_EVT); l 1099 net/tipc/node.c tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); l 1100 net/tipc/node.c le->link = l; l 1102 net/tipc/node.c tipc_node_calculate_timer(n, l); l 1112 net/tipc/node.c if (reset && l && !tipc_link_is_reset(l)) l 1324 net/tipc/node.c struct tipc_link *l; l 1338 net/tipc/node.c l = n->links[i].link; l 1339 net/tipc/node.c if (l) l 1340 net/tipc/node.c tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT); l 1657 net/tipc/node.c struct tipc_link *l, *tnl, *pl = NULL; l 1665 net/tipc/node.c l = n->links[bearer_id].link; l 1666 net/tipc/node.c if (!l) l 1668 net/tipc/node.c rcv_nxt = tipc_link_rcv_nxt(l); l 1682 net/tipc/node.c if (!tipc_link_validate_msg(l, hdr)) { l 1684 net/tipc/node.c trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!"); l 1690 net/tipc/node.c if (!tipc_link_is_up(l)) l 1719 net/tipc/node.c tipc_link_inputq(l)); l 1729 net/tipc/node.c tipc_node_link_failover(n, pl, l, xmitq); l 1737 net/tipc/node.c if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) { l 1756 net/tipc/node.c if (!tipc_link_is_up(l)) l 1760 net/tipc/node.c tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); l 1767 net/tipc/node.c if (tipc_link_is_synching(l)) { l 1768 net/tipc/node.c tnl = l; l 1771 net/tipc/node.c pl = l; l 1780 net/tipc/node.c if (l == pl) l 2052 net/tipc/node.c struct tipc_link *l; l 2062 net/tipc/node.c l = n->links[i].link; l 2063 net/tipc/node.c if (l && !strcmp(tipc_link_name(l), link_name)) { l 132 net/tipc/trace.h int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf); l 246 net/tipc/trace.h TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), l 248 net/tipc/trace.h TP_ARGS(l, dqueues, header), l 258 net/tipc/trace.h tipc_link_name_ext(l, __entry->name); l 259 net/tipc/trace.h tipc_link_dump(l, dqueues, __get_str(buf)); l 268 net/tipc/trace.h TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \ l 269 net/tipc/trace.h TP_ARGS(l, dqueues, header)) l 277 net/tipc/trace.h TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \ l 278 net/tipc/trace.h TP_ARGS(l, dqueues, header), \ l 280 net/tipc/trace.h DEFINE_LINK_EVENT_COND(tipc_link_too_silent, tipc_link_too_silent(l)); l 2787 net/xfrm/xfrm_user.c unsigned int l = 0; l 2789 net/xfrm/xfrm_user.c l += nla_total_size(aead_len(x->aead)); l 2791 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(struct xfrm_algo) + l 2793 net/xfrm/xfrm_user.c l += nla_total_size(xfrm_alg_auth_len(x->aalg)); l 2796 net/xfrm/xfrm_user.c l += nla_total_size(xfrm_alg_len(x->ealg)); l 2798 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(*x->calg)); l 2800 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(*x->encap)); l 2802 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(x->tfcpad)); l 2804 net/xfrm/xfrm_user.c l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); l 2806 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(struct xfrm_replay_state)); l 2808 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + l 2811 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(*x->coaddr)); l 2813 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(x->props.extra_flags)); l 2815 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(x->xso)); l 2817 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(x->props.smark.v)); l 2818 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(x->props.smark.m)); l 2821 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(x->if_id)); l 2824 net/xfrm/xfrm_user.c l += nla_total_size_64bit(sizeof(u64)); l 2826 net/xfrm/xfrm_user.c return l; l 386 samples/bpf/bpf_load.c static int cmp_symbols(const void *l, const void *r) l 388 samples/bpf/bpf_load.c const GElf_Sym *lsym = (const GElf_Sym *)l; l 177 samples/bpf/test_lru_dist.c char *b, *l; l 199 samples/bpf/test_lru_dist.c for (l = strtok(b, "\n"); l; l = strtok(NULL, "\n")) l 200 samples/bpf/test_lru_dist.c retkeys[counts++] = strtoull(l, NULL, 10); l 55 samples/bpf/tracex3_kern.c u64 *value, l, base; l 75 samples/bpf/tracex3_kern.c l = log2l(delta); l 76 samples/bpf/tracex3_kern.c base = 1ll << l; l 77 samples/bpf/tracex3_kern.c index = (l * 64 + (delta - base) * 64 / base) * 3 / 64; l 29 samples/seccomp/bpf-fancy.c struct bpf_labels l = { l 40 samples/seccomp/bpf-fancy.c SYSCALL(__NR_write, JUMP(&l, write_fd)), l 41 samples/seccomp/bpf-fancy.c SYSCALL(__NR_read, JUMP(&l, read)), l 44 samples/seccomp/bpf-fancy.c LABEL(&l, read), l 53 samples/seccomp/bpf-fancy.c LABEL(&l, write_fd), l 55 samples/seccomp/bpf-fancy.c JEQ(STDOUT_FILENO, JUMP(&l, write_buf)), l 56 samples/seccomp/bpf-fancy.c JEQ(STDERR_FILENO, JUMP(&l, write_buf)), l 59 samples/seccomp/bpf-fancy.c LABEL(&l, write_buf), l 61 samples/seccomp/bpf-fancy.c JEQ((unsigned long)msg1, JUMP(&l, msg1_len)), l 62 samples/seccomp/bpf-fancy.c JEQ((unsigned long)msg2, JUMP(&l, msg2_len)), l 63 samples/seccomp/bpf-fancy.c JEQ((unsigned long)buf, JUMP(&l, buf_len)), l 66 samples/seccomp/bpf-fancy.c LABEL(&l, msg1_len), l 71 samples/seccomp/bpf-fancy.c LABEL(&l, msg2_len), l 76 samples/seccomp/bpf-fancy.c LABEL(&l, buf_len), l 86 samples/seccomp/bpf-fancy.c bpf_resolve_jumps(&l, filter, sizeof(filter)/sizeof(*filter)); l 219 scripts/dtc/checks.c int rem, l; l 231 scripts/dtc/checks.c l = strnlen(str, rem); l 232 scripts/dtc/checks.c if (l == rem) { l 236 scripts/dtc/checks.c rem -= l + 1; l 237 scripts/dtc/checks.c str += l + 1; l 444 scripts/dtc/checks.c struct label *l; l 447 scripts/dtc/checks.c for_each_label(node->labels, l) l 448 scripts/dtc/checks.c check_duplicate_label(c, dti, l->label, node, NULL, NULL); l 453 scripts/dtc/checks.c for_each_label(prop->labels, l) l 454 scripts/dtc/checks.c check_duplicate_label(c, dti, l->label, node, prop, NULL); l 172 scripts/dtc/dtc.h #define for_each_label_withdel(l0, l) \ l 173 scripts/dtc/dtc.h for ((l) = (l0); (l); (l) = (l)->next) l 175 scripts/dtc/dtc.h #define for_each_label(l0, l) \ l 176 scripts/dtc/dtc.h for_each_label_withdel(l0, l) \ l 177 scripts/dtc/dtc.h if (!(l)->deleted) l 174 scripts/dtc/flattree.c struct label *l; l 176 scripts/dtc/flattree.c for_each_label(labels, l) { l 177 scripts/dtc/flattree.c fprintf(f, "\t.globl\t%s\n", l->label); l 178 scripts/dtc/flattree.c fprintf(f, "%s:\n", l->label); l 187 scripts/dtc/flattree.c struct label *l; l 191 scripts/dtc/flattree.c for_each_label(labels, l) { l 192 scripts/dtc/flattree.c fprintf(f, "\t.globl\t%s_end\n", l->label); l 193 scripts/dtc/flattree.c fprintf(f, "%s_end:\n", l->label); l 200 scripts/dtc/flattree.c struct label *l; l 202 scripts/dtc/flattree.c for_each_label(labels, l) { l 203 scripts/dtc/flattree.c fprintf(f, "\t.globl\t%s\n", l->label); l 204 scripts/dtc/flattree.c fprintf(f, "%s:\n", l->label); l 517 scripts/dtc/flattree.c struct label *l; l 519 scripts/dtc/flattree.c for_each_label(re->labels, l) { l 520 scripts/dtc/flattree.c fprintf(f, "\t.globl\t%s\n", l->label); l 521 scripts/dtc/flattree.c fprintf(f, "%s:\n", l->label); l 146 scripts/dtc/livetree.c struct label *l; l 151 scripts/dtc/livetree.c for_each_label_withdel(new_node->labels, l) l 152 scripts/dtc/livetree.c add_label(&old_node->labels, l->label); l 172 scripts/dtc/livetree.c for_each_label_withdel(new_prop->labels, l) l 173 scripts/dtc/livetree.c add_label(&old_prop->labels, l->label); l 456 scripts/dtc/livetree.c struct label *l; l 458 scripts/dtc/livetree.c for_each_label(prop->labels, l) l 459 scripts/dtc/livetree.c if (streq(l->label, label)) l 542 scripts/dtc/livetree.c struct label *l; l 546 scripts/dtc/livetree.c for_each_label(tree->labels, l) l 547 scripts/dtc/livetree.c if (streq(l->label, label)) l 831 scripts/dtc/livetree.c struct label *l; l 837 scripts/dtc/livetree.c for_each_label(node->labels, l) { l 840 scripts/dtc/livetree.c p = get_property(an, l->label); l 843 scripts/dtc/livetree.c " exists in /%s", l->label, l 849 scripts/dtc/livetree.c p = build_property(l->label, l 397 scripts/dtc/srcpos.c void srcpos_set_line(char *f, int l) l 400 scripts/dtc/srcpos.c current_srcfile->lineno = l; l 101 scripts/dtc/srcpos.h extern void srcpos_set_line(char *f, int l); l 285 scripts/dtc/treesource.c struct label *l; l 289 scripts/dtc/treesource.c for_each_label(tree->labels, l) l 290 scripts/dtc/treesource.c fprintf(f, "%s: ", l->label); l 307 scripts/dtc/treesource.c for_each_label(prop->labels, l) l 308 scripts/dtc/treesource.c fprintf(f, "%s: ", l->label); l 335 scripts/dtc/treesource.c struct label *l; l 337 scripts/dtc/treesource.c for_each_label(re->labels, l) l 338 scripts/dtc/treesource.c fprintf(f, "%s: ", l->label); l 434 scripts/dtc/util.c int l = strlen(long_opts[i].name) + 1; l 436 scripts/dtc/util.c l += a_arg_len; l 437 scripts/dtc/util.c if (optlen < l) l 438 scripts/dtc/util.c optlen = l; l 36 scripts/extract-cert.c static void display_openssl_errors(int l) l 44 scripts/extract-cert.c fprintf(stderr, "At main.c:%d:\n", l); l 70 scripts/genksyms/keywords.c int l = strlen(r->name); l 71 scripts/genksyms/keywords.c if (len == l && !memcmp(str, r->name, len)) l 96 scripts/insert-sys-cert.c char l[LINE_SIZE]; l 106 scripts/insert-sys-cert.c while (fgets(l, LINE_SIZE, f)) { l 107 scripts/insert-sys-cert.c p = strchr(l, '\n'); l 112 scripts/insert-sys-cert.c n = strstr(l, name); l 120 scripts/insert-sys-cert.c w = strchr(l, ' '); l 125 scripts/insert-sys-cert.c s->address = strtoul(l, NULL, 16); l 256 scripts/kallsyms.c int l = strlen(special_prefixes[i]); l 258 scripts/kallsyms.c if (l <= strlen(sym_name) && l 259 scripts/kallsyms.c strncmp(sym_name, special_prefixes[i], l) == 0) l 264 scripts/kallsyms.c int l = strlen(sym_name) - strlen(special_suffixes[i]); l 266 scripts/kallsyms.c if (l >= 0 && strcmp(sym_name + l, special_suffixes[i]) == 0) l 59 scripts/kconfig/conf.c int l; l 63 scripts/kconfig/conf.c l = strlen(p); l 65 scripts/kconfig/conf.c memmove(str, p, l + 1); l 66 scripts/kconfig/conf.c if (!l) l 68 scripts/kconfig/conf.c p = str + l - 1; l 617 scripts/kconfig/confdata.c size_t l; l 620 scripts/kconfig/confdata.c l = strcspn(p, "\n"); l 622 scripts/kconfig/confdata.c if (l) { l 624 scripts/kconfig/confdata.c xfwrite(p, l, 1, fp); l 625 scripts/kconfig/confdata.c p += l; l 689 scripts/kconfig/confdata.c size_t l; l 693 scripts/kconfig/confdata.c l = strcspn(p, "\n"); l 695 scripts/kconfig/confdata.c if (l) { l 697 scripts/kconfig/confdata.c xfwrite(p, l, 1, fp); l 698 scripts/kconfig/confdata.c p += l; l 51 scripts/kconfig/expr.h #define expr_list_for_each_sym(l, e, s) \ l 52 scripts/kconfig/expr.h for (e = (l); e && (s = e->right.sym); e = e->left.expr) l 875 scripts/kconfig/symbol.c size_t l; l 881 scripts/kconfig/symbol.c l = strcspn(p, "\"\\"); l 882 scripts/kconfig/symbol.c p += l; l 898 scripts/kconfig/symbol.c l = strcspn(p, "\"\\"); l 899 scripts/kconfig/symbol.c strncat(res, p, l); l 900 scripts/kconfig/symbol.c p += l; l 54 scripts/kconfig/util.c size_t l; l 56 scripts/kconfig/util.c l = strlen(gs->s) + strlen(s) + 1; l 57 scripts/kconfig/util.c if (l > gs->len) { l 58 scripts/kconfig/util.c gs->s = xrealloc(gs->s, l); l 59 scripts/kconfig/util.c gs->len = l; l 95 scripts/selinux/genheaders/genheaders.c int len = strlen(map->name), l = sizeof(s) - 1; l 96 scripts/selinux/genheaders/genheaders.c if (len >= l && memcmp(map->name + len - l, s, l) == 0) l 79 scripts/sign-file.c static void display_openssl_errors(int l) l 87 scripts/sign-file.c fprintf(stderr, "At main.c:%d:\n", l); l 2103 security/apparmor/apparmorfs.c loff_t l = *pos; l 2111 security/apparmor/apparmorfs.c for (; profile && l > 0; l--) l 490 security/apparmor/file.c struct aa_label *l, *old; l 496 security/apparmor/file.c l = aa_label_merge(old, label, GFP_ATOMIC); l 497 security/apparmor/file.c if (l) { l 498 security/apparmor/file.c if (l != old) { l 499 security/apparmor/file.c rcu_assign_pointer(fctx->label, l); l 502 security/apparmor/file.c aa_put_label(l); l 103 security/apparmor/include/cred.h struct aa_label *l = aa_current_raw_label(); l 105 security/apparmor/include/cred.h if (label_is_stale(l)) l 106 security/apparmor/include/cred.h return aa_get_newest_label(l); l 107 security/apparmor/include/cred.h return aa_get_label(l); l 154 security/apparmor/include/label.h int aa_label_next_confined(struct aa_label *l, int i); l 288 security/apparmor/include/label.h struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *l); l 293 security/apparmor/include/label.h struct aa_label *aa_label_find(struct aa_label *l); l 372 security/apparmor/include/label.h static inline struct aa_label *__aa_get_label(struct aa_label *l) l 374 security/apparmor/include/label.h if (l && kref_get_unless_zero(&l->count)) l 375 security/apparmor/include/label.h return l; l 380 security/apparmor/include/label.h static inline struct aa_label *aa_get_label(struct aa_label *l) l 382 security/apparmor/include/label.h if (l) l 383 security/apparmor/include/label.h kref_get(&(l->count)); l 385 security/apparmor/include/label.h return l; l 396 security/apparmor/include/label.h static inline struct aa_label *aa_get_label_rcu(struct aa_label __rcu **l) l 402 security/apparmor/include/label.h c = rcu_dereference(*l); l 417 security/apparmor/include/label.h static inline struct aa_label *aa_get_newest_label(struct aa_label *l) l 419 security/apparmor/include/label.h if (!l) l 422 security/apparmor/include/label.h if (label_is_stale(l)) { l 425 security/apparmor/include/label.h AA_BUG(!l->proxy); l 426 security/apparmor/include/label.h AA_BUG(!l->proxy->label); l 431 security/apparmor/include/label.h tmp = aa_get_label_rcu(&l->proxy->label); l 437 security/apparmor/include/label.h return aa_get_label(l); l 440 security/apparmor/include/label.h static inline void aa_put_label(struct aa_label *l) l 442 security/apparmor/include/label.h if (l) l 443 security/apparmor/include/label.h kref_put(&l->count, aa_label_kref); l 447 security/apparmor/include/label.h struct aa_proxy *aa_alloc_proxy(struct aa_label *l, gfp_t gfp); l 112 security/apparmor/include/policy_unpack.h bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r); l 779 security/apparmor/label.c struct aa_label *l; l 789 security/apparmor/label.c l = __label_insert(ls, new, true); l 790 security/apparmor/label.c res = (l == new); l 792 security/apparmor/label.c aa_put_label(l); l 900 security/apparmor/label.c struct aa_label *l; l 909 security/apparmor/label.c l = __label_find(label); l 911 security/apparmor/label.c if (l) l 912 security/apparmor/label.c return l; l 916 security/apparmor/label.c l = __label_insert(ls, label, false); l 919 security/apparmor/label.c return l; l 2103 security/apparmor/label.c struct aa_label *l = __label_update(label); l 2105 security/apparmor/label.c aa_put_label(l); l 111 security/apparmor/policy.c struct aa_label *l; l 121 security/apparmor/policy.c l = aa_label_insert(&profile->ns->labels, &profile->label); l 122 security/apparmor/policy.c AA_BUG(l != &profile->label); l 123 security/apparmor/policy.c aa_put_label(l); l 138 security/apparmor/policy_unpack.c bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r) l 140 security/apparmor/policy_unpack.c if (l->size != r->size) l 142 security/apparmor/policy_unpack.c if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0) l 144 security/apparmor/policy_unpack.c return memcmp(l->data, r->data, r->size) == 0; l 80 security/integrity/ima/ima_fs.c loff_t l = *pos; l 86 security/integrity/ima/ima_fs.c if (!l--) { l 1330 security/integrity/ima/ima_policy.c loff_t l = *pos; l 1335 security/integrity/ima/ima_policy.c if (!l--) { l 163 security/keys/big_key.c unsigned int i, l; l 181 security/keys/big_key.c l = min_t(size_t, len, PAGE_SIZE); l 182 security/keys/big_key.c sg_set_page(&buf->sg[i], buf->pages[i], l, 0); l 183 security/keys/big_key.c len -= l; l 438 security/selinux/hooks.c static int match_opt_prefix(char *s, int l, char **arg) l 444 security/selinux/hooks.c if (len > l || memcmp(s, tokens[i].name, len)) l 447 security/selinux/hooks.c if (len == l || s[len] != '=') l 450 security/selinux/hooks.c } else if (len != l) l 38 security/selinux/ss/mls.c int i, l, len, head, prev; l 47 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 48 security/selinux/ss/mls.c int index_sens = context->range.level[l].sens; l 54 security/selinux/ss/mls.c e = &context->range.level[l].cat; l 72 security/selinux/ss/mls.c if (l == 0) { l 94 security/selinux/ss/mls.c int i, l, head, prev; l 106 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 108 security/selinux/ss/mls.c context->range.level[l].sens - 1)); l 114 security/selinux/ss/mls.c e = &context->range.level[l].cat; l 149 security/selinux/ss/mls.c if (l == 0) { l 162 security/selinux/ss/mls.c int mls_level_isvalid(struct policydb *p, struct mls_level *l) l 166 security/selinux/ss/mls.c if (!l->sens || l->sens > p->p_levels.nprim) l 169 security/selinux/ss/mls.c sym_name(p, SYM_LEVELS, l->sens - 1)); l 178 security/selinux/ss/mls.c return ebitmap_contains(&levdatum->level->cat, &l->cat, l 244 security/selinux/ss/mls.c int l, rc, i; l 286 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 288 security/selinux/ss/mls.c sensitivity = rangep[l]; l 299 security/selinux/ss/mls.c context->range.level[l].sens = levdatum->level->sens; l 319 security/selinux/ss/mls.c rc = ebitmap_set_bit(&context->range.level[l].cat, l 336 security/selinux/ss/mls.c rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1); l 388 security/selinux/ss/mls.c int l, rc = 0; l 391 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 392 security/selinux/ss/mls.c context->range.level[l].sens = range->level[l].sens; l 393 security/selinux/ss/mls.c rc = ebitmap_cpy(&context->range.level[l].cat, l 394 security/selinux/ss/mls.c &range->level[l].cat); l 455 security/selinux/ss/mls.c int l, i; l 460 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 463 security/selinux/ss/mls.c oldc->range.level[l].sens - 1)); l 467 security/selinux/ss/mls.c newc->range.level[l].sens = levdatum->level->sens; l 469 security/selinux/ss/mls.c ebitmap_for_each_positive_bit(&oldc->range.level[l].cat, l 477 security/selinux/ss/mls.c rc = ebitmap_set_bit(&newc->range.level[l].cat, l 33 security/selinux/ss/mls.h int mls_level_isvalid(struct policydb *p, struct mls_level *l); l 1960 security/selinux/ss/policydb.c struct ocontext *l, *c; l 2033 security/selinux/ss/policydb.c for (l = NULL, c = genfs->head; c; l 2034 security/selinux/ss/policydb.c l = c, c = c->next) { l 2050 security/selinux/ss/policydb.c if (l) l 2051 security/selinux/ss/policydb.c l->next = newc; l 2075 security/selinux/ss/policydb.c struct ocontext *l, *c; l 2084 security/selinux/ss/policydb.c l = NULL; l 2090 security/selinux/ss/policydb.c if (l) l 2091 security/selinux/ss/policydb.c l->next = c; l 2094 security/selinux/ss/policydb.c l = c; l 2539 security/selinux/ss/policydb.c static int mls_write_level(struct mls_level *l, void *fp) l 2544 security/selinux/ss/policydb.c buf[0] = cpu_to_le32(l->sens); l 2549 security/selinux/ss/policydb.c rc = ebitmap_write(&l->cat, fp); l 72 security/selinux/ss/sidtab.c u32 l; l 80 security/selinux/ss/sidtab.c for (l = 1; l <= level; ++l) l 81 security/selinux/ss/sidtab.c if (!s->roots[l].ptr_inner) { l 82 security/selinux/ss/sidtab.c s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE, l 84 security/selinux/ss/sidtab.c if (!s->roots[l].ptr_inner) l 86 security/selinux/ss/sidtab.c s->roots[l].ptr_inner->entries[0] = s->roots[l - 1]; l 71 security/smack/smack_lsm.c static int match_opt_prefix(char *s, int l, char **arg) l 77 security/smack/smack_lsm.c if (len > l || memcmp(s, smk_mount_opts[i].name, len)) l 79 security/smack/smack_lsm.c if (len == l || s[len] != '=') l 1923 security/smack/smack_lsm.c struct list_head *l; l 1928 security/smack/smack_lsm.c list_for_each_safe(l, n, &tsp->smk_rules) { l 1929 security/smack/smack_lsm.c rp = list_entry(l, struct smack_rule, list); l 122 sound/aoa/codecs/onyx.c s8 l, r; l 125 sound/aoa/codecs/onyx.c onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l); l 129 sound/aoa/codecs/onyx.c ucontrol->value.integer.value[0] = l + VOLUME_RANGE_SHIFT; l 139 sound/aoa/codecs/onyx.c s8 l, r; l 149 sound/aoa/codecs/onyx.c onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l); l 152 sound/aoa/codecs/onyx.c if (l + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[0] && l 564 sound/aoa/fabrics/layout.c struct layout *l; l 566 sound/aoa/fabrics/layout.c l = layouts; l 567 sound/aoa/fabrics/layout.c while (l->codecs[0].name) { l 568 sound/aoa/fabrics/layout.c if (l->layout_id == id) l 569 sound/aoa/fabrics/layout.c return l; l 570 sound/aoa/fabrics/layout.c l++; l 577 sound/aoa/fabrics/layout.c struct layout *l; l 579 sound/aoa/fabrics/layout.c l = layouts; l 580 sound/aoa/fabrics/layout.c while (l->codecs[0].name) { l 581 sound/aoa/fabrics/layout.c if (l->device_id == id) l 582 sound/aoa/fabrics/layout.c return l; l 583 sound/aoa/fabrics/layout.c l++; l 588 sound/aoa/fabrics/layout.c static void use_layout(struct layout *l) l 593 sound/aoa/fabrics/layout.c if (l->codecs[i].name) { l 594 sound/aoa/fabrics/layout.c request_module("snd-aoa-codec-%s", l->codecs[i].name); l 1263 sound/core/pcm_lib.c const struct snd_pcm_hw_constraint_list *l) l 1266 sound/core/pcm_lib.c snd_pcm_hw_rule_list, (void *)l, l 1374 sound/core/pcm_lib.c unsigned int l = (unsigned long) rule->private; l 1375 sound/core/pcm_lib.c int width = l & 0xffff; l 1376 sound/core/pcm_lib.c unsigned int msbits = l >> 16; l 1409 sound/core/pcm_lib.c unsigned long l = (msbits << 16) | width; l 1412 sound/core/pcm_lib.c (void*) l, l 21 sound/core/pcm_timer.c unsigned long rate, mult, fsize, l, post; l 28 sound/core/pcm_timer.c l = gcd(mult, rate); l 29 sound/core/pcm_timer.c mult /= l; l 30 sound/core/pcm_timer.c rate /= l; l 34 sound/core/pcm_timer.c l = gcd(rate, fsize); l 35 sound/core/pcm_timer.c rate /= l; l 36 sound/core/pcm_timer.c fsize /= l; l 198 sound/core/seq/oss/seq_oss_event.c if (q->l.chn >= 32) l 200 sound/core/seq/oss/seq_oss_event.c switch (q->l.cmd) { l 202 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_PGMCHANGE, l 203 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.p1, ev); l 206 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_CONTROLLER, l 207 sound/core/seq/oss/seq_oss_event.c q->l.chn, q->l.p1, q->l.val, ev); l 211 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_PITCHBEND, l 212 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.val - 8192, ev); l 215 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_CHANPRESS, l 216 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.val, ev); l 82 sound/core/seq/oss/seq_oss_event.h struct evrec_long l; l 548 sound/core/seq/oss/seq_oss_midi.c ossev.l.cmd = MIDI_CTL_CHANGE; break; l 550 sound/core/seq/oss/seq_oss_midi.c ossev.l.cmd = MIDI_PGM_CHANGE; break; l 552 sound/core/seq/oss/seq_oss_midi.c ossev.l.cmd = MIDI_CHN_PRESSURE; break; l 554 sound/core/seq/oss/seq_oss_midi.c ossev.l.cmd = MIDI_PITCH_BEND; break; l 573 sound/core/seq/oss/seq_oss_midi.c ossev.l.code = EV_CHN_COMMON; l 574 sound/core/seq/oss/seq_oss_midi.c ossev.l.p1 = ev->data.control.param; l 575 sound/core/seq/oss/seq_oss_midi.c ossev.l.val = ev->data.control.value; l 576 sound/core/seq/oss/seq_oss_midi.c ossev.l.chn = ev->data.control.channel; l 579 sound/core/seq/oss/seq_oss_midi.c ossev.l.code = EV_CHN_COMMON; l 580 sound/core/seq/oss/seq_oss_midi.c ossev.l.val = ev->data.control.value + 8192; l 581 sound/core/seq/oss/seq_oss_midi.c ossev.l.chn = ev->data.control.channel; l 37 sound/drivers/vx/vx_mixer.c u32 l; l 41 sound/drivers/vx/vx_mixer.c u16 l; l 51 sound/drivers/vx/vx_mixer.c u16 l; l 66 sound/drivers/vx/vx_mixer.c #define SET_CDC_DATA_INIT(di) ((di).l = 0L, SET_CDC_DATA_SEL(di,XX_CODEC_SELECTOR)) l 81 sound/drivers/vx/vx_mixer.c vx_write_codec_reg(chip, codec, data.l); l 125 sound/hda/ext/hdac_ext_controller.c struct hdac_ext_link *l; l 128 sound/hda/ext/hdac_ext_controller.c l = list_first_entry(&bus->hlink_list, struct hdac_ext_link, list); l 129 sound/hda/ext/hdac_ext_controller.c list_del(&l->list); l 130 sound/hda/ext/hdac_ext_controller.c kfree(l); l 231 sound/isa/gus/gus_main.c int l, idx, local; l 254 sound/isa/gus/gus_main.c for (l = 0, local = gus->gf1.memory; l < 4; l++, local -= 256 * 1024) { l 255 sound/isa/gus/gus_main.c gus->gf1.mem_alloc.banks_8[l].address = l 256 sound/isa/gus/gus_main.c gus->gf1.mem_alloc.banks_8[l].size = 0; l 257 sound/isa/gus/gus_main.c gus->gf1.mem_alloc.banks_16[l].address = l << 18; l 258 sound/isa/gus/gus_main.c gus->gf1.mem_alloc.banks_16[l].size = local > 0 ? 256 * 1024 : 0; l 169 sound/isa/msnd/msnd.h #define HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF)) l 170 sound/isa/msnd/msnd.h #define LOWORD(l) ((u16)(u32)(l)) l 736 sound/isa/wavefront/wavefront_synth.c int i, x, l, cnt; l 749 sound/isa/wavefront/wavefront_synth.c for (l = 0; l < WF_NUM_LAYERS; l++) { l 750 sound/isa/wavefront/wavefront_synth.c if (prog.layer[l].mute) { l 752 sound/isa/wavefront/wavefront_synth.c [prog.layer[l].patch_number] |= l 191 sound/mips/hal2.c int l, r; l 197 sound/mips/hal2.c l = 0; l 200 sound/mips/hal2.c l = 31 - ((tmp >> H2I_C2_L_ATT_SHIFT) & 31); l 206 sound/mips/hal2.c l = (tmp >> H2I_C2_L_GAIN_SHIFT) & 15; l 212 sound/mips/hal2.c ucontrol->value.integer.value[0] = l; l 223 sound/mips/hal2.c int l, r; l 225 sound/mips/hal2.c l = ucontrol->value.integer.value[0]; l 232 sound/mips/hal2.c if (l | r) { l 233 sound/mips/hal2.c l = 31 - l; l 235 sound/mips/hal2.c new |= (l << H2I_C2_L_ATT_SHIFT); l 244 sound/mips/hal2.c new |= (l << H2I_C2_L_GAIN_SHIFT); l 396 sound/mips/sgio2audio.c s64 l, r; l 422 sound/mips/sgio2audio.c l = src[0]; /* sign extend */ l 425 sound/mips/sgio2audio.c *dst = ((l & 0x00ffffff) << CHANNEL_LEFT_SHIFT) | l 1017 sound/pci/asihpi/hpi_internal.h struct hpi_gpio_msg l; /* digital i/o */ l 1078 sound/pci/asihpi/hpi_internal.h struct hpi_gpio_res l; /* digital i/o */ l 1186 sound/pci/asihpi/hpi_internal.h struct hpi_gpio_msg l; l 1205 sound/pci/asihpi/hpi_internal.h struct hpi_gpio_res l; l 97 sound/pci/asihpi/hpios.h static inline void cond_lock(struct hpios_spinlock *l) l 103 sound/pci/asihpi/hpios.h spin_lock(&((l)->lock)); l 104 sound/pci/asihpi/hpios.h l->lock_context = IN_LOCK_IRQ; l 106 sound/pci/asihpi/hpios.h spin_lock_bh(&((l)->lock)); l 107 sound/pci/asihpi/hpios.h l->lock_context = IN_LOCK_BH; l 111 sound/pci/asihpi/hpios.h static inline void cond_unlock(struct hpios_spinlock *l) l 113 sound/pci/asihpi/hpios.h if (l->lock_context == IN_LOCK_BH) l 114 sound/pci/asihpi/hpios.h spin_unlock_bh(&((l)->lock)); l 116 sound/pci/asihpi/hpios.h spin_unlock(&((l)->lock)); l 804 sound/pci/au88x0/au88x0_a3d.c int l, r; l 806 sound/pci/au88x0/au88x0_a3d.c l = ucontrol->value.integer.value[0]; l 808 sound/pci/au88x0/au88x0_a3d.c vortex_a3d_coord2ild(a->ild, l, r); l 810 sound/pci/au88x0/au88x0_a3d.c a3dsrc_SetGainTarget(a, l, r); l 811 sound/pci/au88x0/au88x0_a3d.c a3dsrc_SetGainCurrent(a, l, r); l 1498 sound/pci/emu10k1/emufx.c int j, k, l, d; l 1501 sound/pci/emu10k1/emufx.c l = 0xe0 + (z * 8) + (j * 4); l 1512 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(l+1), A_GPR(l), A_GPR(l+1), A_GPR(TREBLE_GPR + 4 + j)); l 1513 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(l), A_GPR(k+2), A_GPR(l), A_GPR(TREBLE_GPR + 2 + j)); l 1514 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(l+3), A_GPR(l+2), A_GPR(l+3), A_GPR(TREBLE_GPR + 8 + j)); l 1515 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMAC0, A_GPR(l+2), A_GPR_ACCU, A_GPR(l+2), A_GPR(TREBLE_GPR + 6 + j)); l 1516 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACINT0, A_GPR(l+2), A_C_00000000, A_GPR(l+2), A_C_00000010); l 1518 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iACC3, A_GPR(d), A_GPR(l+2), A_C_00000000, A_C_00000000); l 2220 sound/pci/emu10k1/emufx.c int j, k, l, d; l 2223 sound/pci/emu10k1/emufx.c l = 0xd0 + (z * 8) + (j * 4); l 2234 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(l+1), GPR(l), GPR(l+1), GPR(TREBLE_GPR + 4 + j)); l 2235 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(l), GPR(k+2), GPR(l), GPR(TREBLE_GPR + 2 + j)); l 2236 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(l+3), GPR(l+2), GPR(l+3), GPR(TREBLE_GPR + 8 + j)); l 2237 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMAC0, GPR(l+2), GPR_ACCU, GPR(l+2), GPR(TREBLE_GPR + 6 + j)); l 2238 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACINT0, GPR(l+2), C_00000000, GPR(l+2), C_00000010); l 2240 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iACC3, GPR(d), GPR(l+2), C_00000000, C_00000000); l 72 sound/pci/emu10k1/memory.c #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member) l 292 sound/pci/korg1212/korg1212.c } l; l 938 sound/pci/korg1212/korg1212.c sensVals.l.v.leftChanId = SET_SENS_LEFTCHANID; l 940 sound/pci/korg1212/korg1212.c sensVals.l.v.leftChanVal = korg1212->leftADCInSens; l 959 sound/pci/korg1212/korg1212.c if (sensVals.l.leftSensBits & (0x0001 << bitPosition)) l 155 sound/pci/ymfpci/ymfpci.c static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, int l, int l2) { return -ENOSYS; } l 923 sound/ppc/pmac.c unsigned int l; l 1068 sound/ppc/pmac.c prop = of_get_property(sound, "sample-rates", &l); l 1070 sound/ppc/pmac.c prop = of_get_property(sound, "output-frame-rates", &l); l 1074 sound/ppc/pmac.c for (l /= sizeof(int); l > 0; --l) { l 545 sound/soc/codecs/sgtl5000.c int l; l 551 sound/soc/codecs/sgtl5000.c l = (reg & SGTL5000_DAC_VOL_LEFT_MASK) >> SGTL5000_DAC_VOL_LEFT_SHIFT; l 557 sound/soc/codecs/sgtl5000.c l = clamp(l, 0x3c, 0xfc); l 561 sound/soc/codecs/sgtl5000.c l = 0xfc - l; l 564 sound/soc/codecs/sgtl5000.c ucontrol->value.integer.value[0] = l; l 598 sound/soc/codecs/sgtl5000.c int l; l 601 sound/soc/codecs/sgtl5000.c l = ucontrol->value.integer.value[0]; l 605 sound/soc/codecs/sgtl5000.c l = clamp(l, 0, 0xfc - 0x3c); l 609 sound/soc/codecs/sgtl5000.c l = 0xfc - l; l 613 sound/soc/codecs/sgtl5000.c reg = l << SGTL5000_DAC_VOL_LEFT_SHIFT | l 27 sound/usb/usx2y/usb_stream.c int l = usb_stream_next_packet_size(sk); l 28 sound/usb/usx2y/usb_stream.c if (s->idle_outsize + lb + l > s->period_size) l 33 sound/usb/usx2y/usb_stream.c urb->iso_frame_desc[pack].length = l; l 34 sound/usb/usx2y/usb_stream.c lb += l; l 280 sound/usb/usx2y/usb_stream.c int p = 0, lb = 0, l = 0; l 289 sound/usb/usx2y/usb_stream.c l = id->actual_length; l 291 sound/usb/usx2y/usb_stream.c od[p].length = l; l 293 sound/usb/usx2y/usb_stream.c lb += l; l 299 sound/usb/usx2y/usb_stream.c l = inurb->iso_frame_desc[s->sync_packet].actual_length; l 301 sound/usb/usx2y/usb_stream.c if (s->idle_outsize + lb + l > s->period_size) l 304 sound/usb/usx2y/usb_stream.c od[p].length = l; l 306 sound/usb/usx2y/usb_stream.c lb += l; l 315 sound/usb/usx2y/usb_stream.c s->idle_outsize + lb + l, l 316 sound/usb/usx2y/usb_stream.c s->idle_outsize, lb, l, l 381 sound/usb/usx2y/usb_stream.c int il, ol, l, p; l 387 sound/usb/usx2y/usb_stream.c l = 0; l 396 sound/usb/usx2y/usb_stream.c for (; p < iu->number_of_packets && l < s->period_size; ++p) { l 399 sound/usb/usx2y/usb_stream.c if (l + il > s->period_size) l 400 sound/usb/usx2y/usb_stream.c il = s->period_size - l; l 413 sound/usb/usx2y/usb_stream.c l += il; l 416 sound/usb/usx2y/usb_stream.c if (l != s->period_size) l 418 sound/usb/usx2y/usb_stream.c l/(int)s->cfg.frame_size); l 439 sound/usb/usx2y/usb_stream.c int l, p; l 450 sound/usb/usx2y/usb_stream.c l = id[p].actual_length; l 451 sound/usb/usx2y/usb_stream.c if (unlikely(l == 0 || id[p].status)) { l 463 sound/usb/usx2y/usb_stream.c s->inpacket[s->inpacket_head].length = l; l 464 sound/usb/usx2y/usb_stream.c if (insize + l > s->period_size && l 469 sound/usb/usx2y/usb_stream.c insize += l; l 470 sound/usb/usx2y/usb_stream.c urb_size += l; l 480 sound/usb/usx2y/usb_stream.c l = s->idle_outsize; l 482 sound/usb/usx2y/usb_stream.c sk->write_page) - l; l 487 sound/usb/usx2y/usb_stream.c s->outpacket[0].length = sk->idle_outurb->transfer_buffer_length + l; l 522 sound/usb/usx2y/usb_stream.c int l, p, max_diff, max_diff_0; l 539 sound/usb/usx2y/usb_stream.c l = inurb->iso_frame_desc[p].actual_length; l 540 sound/usb/usx2y/usb_stream.c urb_size += l; l 565 sound/usb/usx2y/usb_stream.c l = s->idle_insize; l 566 sound/usb/usx2y/usb_stream.c while (l > s->inpacket[split].length) { l 567 sound/usb/usx2y/usb_stream.c l -= s->inpacket[split].length; l 575 sound/usb/usx2y/usb_stream.c s->inpacket[split].length - l; l 611 sound/usb/usx2y/usb_stream.c int l = id[p].actual_length; l 612 sound/usb/usx2y/usb_stream.c if (l < s->cfg.frame_size) { l 615 sound/usb/usx2y/usb_stream.c snd_printk(KERN_WARNING "%i\n", l); l 623 sound/usb/usx2y/usb_stream.c s->inpacket[s->inpacket_head].length = l; l 630 sound/usb/usx2y/usb_stream.c int l = urb->iso_frame_desc[pack].actual_length; l 631 sound/usb/usx2y/usb_stream.c printk(KERN_CONT " %i", l); l 136 tools/arch/x86/include/uapi/asm/kvm.h __u8 present, dpl, db, s, l, g, avl; l 18 tools/bpf/bpftool/cfg.c struct list_head l; l 27 tools/bpf/bpftool/cfg.c struct list_head l; l 39 tools/bpf/bpftool/cfg.c struct list_head l; l 48 tools/bpf/bpftool/cfg.c #define func_prev(func) list_prev_entry(func, l) l 49 tools/bpf/bpftool/cfg.c #define func_next(func) list_next_entry(func, l) l 50 tools/bpf/bpftool/cfg.c #define bb_prev(bb) list_prev_entry(bb, l) l 51 tools/bpf/bpftool/cfg.c #define bb_next(bb) list_next_entry(bb, l) l 55 tools/bpf/bpftool/cfg.c list_first_entry(&cfg->funcs, struct func_node, l) l 57 tools/bpf/bpftool/cfg.c list_last_entry(&cfg->funcs, struct func_node, l) l 59 tools/bpf/bpftool/cfg.c list_first_entry(&func->bbs, struct bb_node, l) l 61 tools/bpf/bpftool/cfg.c list_last_entry(&func->bbs, struct bb_node, l) l 67 tools/bpf/bpftool/cfg.c list_for_each_entry(func, &cfg->funcs, l) { l 82 tools/bpf/bpftool/cfg.c list_add(&new_func->l, &func->l); l 93 tools/bpf/bpftool/cfg.c list_for_each_entry(bb, &func->bbs, l) { l 109 tools/bpf/bpftool/cfg.c list_add(&new_bb->l, &bb->l); l 126 tools/bpf/bpftool/cfg.c list_add(&bb->l, after); l 153 tools/bpf/bpftool/cfg.c list_for_each_entry_from(func, &last_func->l, l) { l 207 tools/bpf/bpftool/cfg.c list_for_each_entry_from(bb, &last->l, l) { l 225 tools/bpf/bpftool/cfg.c bb = func_insert_dummy_bb(&func_last_bb(func)->l); l 248 tools/bpf/bpftool/cfg.c list_for_each_entry(bb, &func->bbs, l) { l 287 tools/bpf/bpftool/cfg.c list_add_tail(&e->l, &bb->e_succs); l 293 tools/bpf/bpftool/cfg.c list_add_tail(&e->l, &bb->e_prevs); l 297 tools/bpf/bpftool/cfg.c list_for_each_entry_from(bb, &exit_bb(func)->l, l) { l 308 tools/bpf/bpftool/cfg.c list_add_tail(&e->l, &bb->e_succs); l 314 tools/bpf/bpftool/cfg.c list_add_tail(&e->l, &bb->e_succs); l 320 tools/bpf/bpftool/cfg.c list_add_tail(&e->l, &bb->e_succs); l 327 tools/bpf/bpftool/cfg.c list_add_tail(&e->l, &bb->e_succs); l 343 tools/bpf/bpftool/cfg.c list_for_each_entry(func, &cfg->funcs, l) { l 358 tools/bpf/bpftool/cfg.c list_for_each_entry_safe(func, func2, &cfg->funcs, l) { l 361 tools/bpf/bpftool/cfg.c list_for_each_entry_safe(bb, bb2, &func->bbs, l) { l 364 tools/bpf/bpftool/cfg.c list_for_each_entry_safe(e, e2, &bb->e_prevs, l) { l 365 tools/bpf/bpftool/cfg.c list_del(&e->l); l 369 tools/bpf/bpftool/cfg.c list_for_each_entry_safe(e, e2, &bb->e_succs, l) { l 370 tools/bpf/bpftool/cfg.c list_del(&e->l); l 374 tools/bpf/bpftool/cfg.c list_del(&bb->l); l 378 tools/bpf/bpftool/cfg.c list_del(&func->l); l 425 tools/bpf/bpftool/cfg.c list_for_each_entry(e, &bb->e_succs, l) { l 437 tools/bpf/bpftool/cfg.c list_for_each_entry(bb, &func->bbs, l) { l 447 tools/bpf/bpftool/cfg.c list_for_each_entry(bb, &func->bbs, l) { l 463 tools/bpf/bpftool/cfg.c list_for_each_entry(func, &cfg->funcs, l) { l 125 tools/bpf/bpftool/xlated_dumper.c unsigned int l = strlen(fmt); l 126 tools/bpf/bpftool/xlated_dumper.c char chomped_fmt[l]; l 130 tools/bpf/bpftool/xlated_dumper.c if (l > 0) { l 131 tools/bpf/bpftool/xlated_dumper.c strncpy(chomped_fmt, fmt, l - 1); l 132 tools/bpf/bpftool/xlated_dumper.c chomped_fmt[l - 1] = '\0'; l 56 tools/include/linux/bitops.h static inline unsigned fls_long(unsigned long l) l 58 tools/include/linux/bitops.h if (sizeof(l) == 4) l 59 tools/include/linux/bitops.h return fls(l); l 60 tools/include/linux/bitops.h return fls64(l); l 21 tools/include/linux/bits.h #define GENMASK(h, l) \ l 22 tools/include/linux/bits.h (((~UL(0)) - (UL(1) << (l)) + 1) & \ l 25 tools/include/linux/bits.h #define GENMASK_ULL(h, l) \ l 26 tools/include/linux/bits.h (((~ULL(0)) - (ULL(1) << (l)) + 1) & \ l 4908 tools/lib/bpf/libbpf.c struct bpf_link_fd *l = (void *)link; l 4911 tools/lib/bpf/libbpf.c err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0); l 4915 tools/lib/bpf/libbpf.c close(l->fd); l 5213 tools/lib/bpf/libbpf.c struct bpf_link_fd *l = (void *)link; l 5215 tools/lib/bpf/libbpf.c return close(l->fd); l 104 tools/lib/lockdep/preload.c struct lock_lookup *l; l 109 tools/lib/lockdep/preload.c l = rb_entry(*node, struct lock_lookup, node); l 112 tools/lib/lockdep/preload.c if (lock < l->orig) l 113 tools/lib/lockdep/preload.c node = &l->node.rb_left; l 114 tools/lib/lockdep/preload.c else if (lock > l->orig) l 115 tools/lib/lockdep/preload.c node = &l->node.rb_right; l 178 tools/lib/lockdep/preload.c struct lock_lookup *l; l 188 tools/lib/lockdep/preload.c l = alloc_lock(); l 189 tools/lib/lockdep/preload.c if (l == NULL) l 192 tools/lib/lockdep/preload.c l->orig = lock; l 199 tools/lib/lockdep/preload.c sprintf(l->name, "%p", lock); l 200 tools/lib/lockdep/preload.c lockdep_init_map(&l->dep_map, l->name, &l->key, 0); l 205 tools/lib/lockdep/preload.c rb_link_node(&l->node, parent, node); l 206 tools/lib/lockdep/preload.c rb_insert_color(&l->node, &locks); l 209 tools/lib/lockdep/preload.c return l; l 122 tools/lib/subcmd/parse-options.h #define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) } l 124 tools/lib/subcmd/parse-options.h #define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) } l 125 tools/lib/subcmd/parse-options.h #define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h) } l 126 tools/lib/subcmd/parse-options.h #define OPT_BOOLEAN_FLAG(s, l, v, h, f) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = check_vtype(v, bool *), .help = (h), .flags = (f) } l 127 tools/lib/subcmd/parse-options.h #define OPT_BOOLEAN_SET(s, l, v, os, h) \ l 128 tools/lib/subcmd/parse-options.h { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), \ l 131 tools/lib/subcmd/parse-options.h #define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) } l 132 tools/lib/subcmd/parse-options.h #define OPT_SET_UINT(s, l, v, h, i) { .type = OPTION_SET_UINT, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .defval = (i) } l 133 tools/lib/subcmd/parse-options.h #define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) } l 134 tools/lib/subcmd/parse-options.h #define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) } l 135 tools/lib/subcmd/parse-options.h #define OPT_UINTEGER(s, l, v, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) } l 136 tools/lib/subcmd/parse-options.h #define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) } l 137 tools/lib/subcmd/parse-options.h #define OPT_ULONG(s, l, v, h) { .type = OPTION_ULONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned long *), .help = (h) } l 138 tools/lib/subcmd/parse-options.h #define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) } l 139 tools/lib/subcmd/parse-options.h #define OPT_STRING(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), .argh = (a), .help = (h) } l 140 tools/lib/subcmd/parse-options.h #define OPT_STRING_OPTARG(s, l, v, a, h, d) \ l 141 tools/lib/subcmd/parse-options.h { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \ l 144 tools/lib/subcmd/parse-options.h #define OPT_STRING_OPTARG_SET(s, l, v, os, a, h, d) \ l 145 tools/lib/subcmd/parse-options.h { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \ l 149 tools/lib/subcmd/parse-options.h #define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), .argh = (a), .help = (h), .flags = PARSE_OPT_NOEMPTY} l 150 tools/lib/subcmd/parse-options.h #define OPT_DATE(s, l, v, h) \ l 151 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb } l 152 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK(s, l, v, a, h, f) \ l 153 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f) } l 154 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \ l 155 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG } l 156 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \ l 157 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT } l 158 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK_DEFAULT_NOOPT(s, l, v, a, h, f, d) \ l 159 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\ l 162 tools/lib/subcmd/parse-options.h #define OPT_CALLBACK_OPTARG(s, l, v, d, a, h, f) \ l 163 tools/lib/subcmd/parse-options.h { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), \ l 1079 tools/perf/bench/numa.c static void show_summary(double runtime_ns_max, int l, double *convergence) l 1082 tools/perf/bench/numa.c (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0); l 1108 tools/perf/bench/numa.c u32 l; l 1156 tools/perf/bench/numa.c for (l = 0; l < g->p.nr_loops; l++) { l 1162 tools/perf/bench/numa.c val += do_work(global_data, g->p.bytes_global, process_nr, g->p.nr_proc, l, val); l 1163 tools/perf/bench/numa.c val += do_work(process_data, g->p.bytes_process, thread_nr, g->p.nr_threads, l, val); l 1164 tools/perf/bench/numa.c val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val); l 1176 tools/perf/bench/numa.c val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val); l 1189 tools/perf/bench/numa.c td->loops_done = l; l 1255 tools/perf/bench/numa.c show_summary(runtime_ns_max, l, &convergence); l 547 tools/perf/builtin-c2c.c uint64_t l = 0, r = 0; l 550 tools/perf/builtin-c2c.c l = cl_offset(left->mem_info->daddr.addr); l 554 tools/perf/builtin-c2c.c return (int64_t)(r - l); l 573 tools/perf/builtin-diff.c s64 l, r; l 578 tools/perf/builtin-diff.c l = llabs(left->diff.cycles); l 580 tools/perf/builtin-diff.c return r - l; l 789 tools/perf/builtin-diff.c static int64_t cmp_doubles(double l, double r) l 791 tools/perf/builtin-diff.c if (l > r) l 793 tools/perf/builtin-diff.c else if (l < r) l 806 tools/perf/builtin-diff.c double l = left->diff.period_ratio_delta; l 809 tools/perf/builtin-diff.c return cmp_doubles(l, r); l 813 tools/perf/builtin-diff.c double l = fabs(left->diff.period_ratio_delta); l 816 tools/perf/builtin-diff.c return cmp_doubles(l, r); l 820 tools/perf/builtin-diff.c double l = left->diff.period_ratio; l 823 tools/perf/builtin-diff.c return cmp_doubles(l, r); l 827 tools/perf/builtin-diff.c s64 l = left->diff.wdiff; l 830 tools/perf/builtin-diff.c return r - l; l 1416 tools/perf/builtin-kmem.c struct alloc_stat *l = a; l 1419 tools/perf/builtin-kmem.c if (l->ptr < r->ptr) l 1421 tools/perf/builtin-kmem.c else if (l->ptr > r->ptr) l 1433 tools/perf/builtin-kmem.c struct alloc_stat *l = a; l 1436 tools/perf/builtin-kmem.c if (l->call_site < r->call_site) l 1438 tools/perf/builtin-kmem.c else if (l->call_site > r->call_site) l 1450 tools/perf/builtin-kmem.c struct alloc_stat *l = a; l 1453 tools/perf/builtin-kmem.c if (l->hit < r->hit) l 1455 tools/perf/builtin-kmem.c else if (l->hit > r->hit) l 1467 tools/perf/builtin-kmem.c struct alloc_stat *l = a; l 1470 tools/perf/builtin-kmem.c if (l->bytes_alloc < r->bytes_alloc) l 1472 tools/perf/builtin-kmem.c else if (l->bytes_alloc > r->bytes_alloc) l 1485 tools/perf/builtin-kmem.c struct alloc_stat *l = a; l 1488 tools/perf/builtin-kmem.c x = fragmentation(l->bytes_req, l->bytes_alloc); l 1505 tools/perf/builtin-kmem.c struct alloc_stat *l = a; l 1508 tools/perf/builtin-kmem.c if (l->pingpong < r->pingpong) l 1510 tools/perf/builtin-kmem.c else if (l->pingpong > r->pingpong) l 1523 tools/perf/builtin-kmem.c struct page_stat *l = a; l 1526 tools/perf/builtin-kmem.c if (l->page < r->page) l 1528 tools/perf/builtin-kmem.c else if (l->page > r->page) l 1540 tools/perf/builtin-kmem.c struct page_stat *l = a; l 1543 tools/perf/builtin-kmem.c if (l->callsite < r->callsite) l 1545 tools/perf/builtin-kmem.c else if (l->callsite > r->callsite) l 1557 tools/perf/builtin-kmem.c struct page_stat *l = a; l 1560 tools/perf/builtin-kmem.c if (l->nr_alloc < r->nr_alloc) l 1562 tools/perf/builtin-kmem.c else if (l->nr_alloc > r->nr_alloc) l 1574 tools/perf/builtin-kmem.c struct page_stat *l = a; l 1577 tools/perf/builtin-kmem.c if (l->alloc_bytes < r->alloc_bytes) l 1579 tools/perf/builtin-kmem.c else if (l->alloc_bytes > r->alloc_bytes) l 1591 tools/perf/builtin-kmem.c struct page_stat *l = a; l 1594 tools/perf/builtin-kmem.c if (l->order < r->order) l 1596 tools/perf/builtin-kmem.c else if (l->order > r->order) l 1608 tools/perf/builtin-kmem.c struct page_stat *l = a; l 1612 tools/perf/builtin-kmem.c if (l->migrate_type == -1U) l 1615 tools/perf/builtin-kmem.c if (l->migrate_type < r->migrate_type) l 1617 tools/perf/builtin-kmem.c else if (l->migrate_type > r->migrate_type) l 1629 tools/perf/builtin-kmem.c struct page_stat *l = a; l 1633 tools/perf/builtin-kmem.c if (l->gfp_flags == -1U) l 1636 tools/perf/builtin-kmem.c if (l->gfp_flags < r->gfp_flags) l 1638 tools/perf/builtin-kmem.c else if (l->gfp_flags > r->gfp_flags) l 591 tools/perf/builtin-probe.c # define set_nobuild(s, l, c) set_option_nobuild(options, s, l, "NO_DWARF=1", c) l 2279 tools/perf/builtin-record.c # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c) l 2293 tools/perf/builtin-record.c # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c) l 943 tools/perf/builtin-sched.c thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) l 951 tools/perf/builtin-sched.c ret = sort->cmp(l, r); l 1353 tools/perf/builtin-sched.c static int pid_cmp(struct work_atoms *l, struct work_atoms *r) l 1355 tools/perf/builtin-sched.c if (l->thread == r->thread) l 1357 tools/perf/builtin-sched.c if (l->thread->tid < r->thread->tid) l 1359 tools/perf/builtin-sched.c if (l->thread->tid > r->thread->tid) l 1361 tools/perf/builtin-sched.c return (int)(l->thread - r->thread); l 1364 tools/perf/builtin-sched.c static int avg_cmp(struct work_atoms *l, struct work_atoms *r) l 1368 tools/perf/builtin-sched.c if (!l->nb_atoms) l 1374 tools/perf/builtin-sched.c avgl = l->total_lat / l->nb_atoms; l 1385 tools/perf/builtin-sched.c static int max_cmp(struct work_atoms *l, struct work_atoms *r) l 1387 tools/perf/builtin-sched.c if (l->max_lat < r->max_lat) l 1389 tools/perf/builtin-sched.c if (l->max_lat > r->max_lat) l 1395 tools/perf/builtin-sched.c static int switch_cmp(struct work_atoms *l, struct work_atoms *r) l 1397 tools/perf/builtin-sched.c if (l->nb_atoms < r->nb_atoms) l 1399 tools/perf/builtin-sched.c if (l->nb_atoms > r->nb_atoms) l 1405 tools/perf/builtin-sched.c static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) l 1407 tools/perf/builtin-sched.c if (l->total_runtime < r->total_runtime) l 1409 tools/perf/builtin-sched.c if (l->total_runtime > r->total_runtime) l 432 tools/perf/builtin-stat.c size_t l; l 516 tools/perf/builtin-stat.c l = strlen(counter->unit); l 517 tools/perf/builtin-stat.c if (l > stat_config.unit_width) l 518 tools/perf/builtin-stat.c stat_config.unit_width = l; l 250 tools/perf/ui/browsers/annotate.c struct annotation_line *l; l 254 tools/perf/ui/browsers/annotate.c l = rb_entry(parent, struct annotation_line, rb_node); l 256 tools/perf/ui/browsers/annotate.c if (disasm__cmp(al, l, browser->opts->percent_type) < 0) l 575 tools/perf/util/dso.c struct rlimit l; l 579 tools/perf/util/dso.c if (getrlimit(RLIMIT_NOFILE, &l) == 0) { l 580 tools/perf/util/dso.c if (l.rlim_cur == RLIM_INFINITY) l 581 tools/perf/util/dso.c limit = l.rlim_cur; l 583 tools/perf/util/dso.c limit = l.rlim_cur / 2; l 1729 tools/perf/util/evsel.c struct rlimit l; l 1732 tools/perf/util/evsel.c if (getrlimit(RLIMIT_NOFILE, &l) == 0) { l 1734 tools/perf/util/evsel.c l.rlim_cur = l.rlim_max; l 1736 tools/perf/util/evsel.c l.rlim_cur = l.rlim_max + 1000; l 1737 tools/perf/util/evsel.c l.rlim_max = l.rlim_cur; l 1739 tools/perf/util/evsel.c if (setrlimit(RLIMIT_NOFILE, &l) == 0) { l 129 tools/perf/util/jitdump.h size_t l = strlen(ent->name) + 1; l 130 tools/perf/util/jitdump.h return a + l; l 2116 tools/perf/util/machine.c static int remove_loops(struct branch_entry *l, int nr, l 2127 tools/perf/util/machine.c int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; l 2132 tools/perf/util/machine.c } else if (l[chash[h]].from == l[i].from) { l 2137 tools/perf/util/machine.c if (l[j].from != l[i + off].from) { l 2145 tools/perf/util/machine.c l + i, off); l 2150 tools/perf/util/machine.c memmove(l + i, l + i + off, l 2151 tools/perf/util/machine.c j * sizeof(*l)); l 118 tools/perf/util/mem-events.c size_t l = 0, i; l 137 tools/perf/util/mem-events.c if (l) { l 139 tools/perf/util/mem-events.c l += 4; l 141 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, tlb_access[i]); l 144 tools/perf/util/mem-events.c l += scnprintf(out, sz - l, "N/A"); l 146 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, " hit"); l 148 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, " miss"); l 150 tools/perf/util/mem-events.c return l; l 180 tools/perf/util/mem-events.c size_t i, l = 0; l 200 tools/perf/util/mem-events.c l += 7; l 209 tools/perf/util/mem-events.c l += 4; l 211 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, mem_lvl[i]); l 218 tools/perf/util/mem-events.c l += 4; l 221 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]); l 223 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, "L%d", lvl); l 226 tools/perf/util/mem-events.c if (l == 0) l 227 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, "N/A"); l 229 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, " hit"); l 231 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, " miss"); l 233 tools/perf/util/mem-events.c return l; l 246 tools/perf/util/mem-events.c size_t i, l = 0; l 258 tools/perf/util/mem-events.c if (l) { l 260 tools/perf/util/mem-events.c l += 4; l 262 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, snoop_access[i]); l 266 tools/perf/util/mem-events.c if (l) { l 268 tools/perf/util/mem-events.c l += 4; l 270 tools/perf/util/mem-events.c l += scnprintf(out + l, sz - l, "Fwd"); l 274 tools/perf/util/mem-events.c l += scnprintf(out, sz - l, "N/A"); l 276 tools/perf/util/mem-events.c return l; l 282 tools/perf/util/mem-events.c int l; l 288 tools/perf/util/mem-events.c l = scnprintf(out, sz, "N/A"); l 290 tools/perf/util/mem-events.c l = scnprintf(out, sz, "Yes"); l 292 tools/perf/util/mem-events.c l = scnprintf(out, sz, "No"); l 294 tools/perf/util/mem-events.c return l; l 888 tools/perf/util/probe-event.c static int __show_one_line(FILE *fp, int l, bool skip, bool show_num) l 901 tools/perf/util/probe-event.c color_fprintf(stdout, color, prefix, l); l 917 tools/perf/util/probe-event.c static int _show_one_line(FILE *fp, int l, bool skip, bool show_num) l 919 tools/perf/util/probe-event.c int rv = __show_one_line(fp, l, skip, show_num); l 927 tools/perf/util/probe-event.c #define show_one_line_with_num(f,l) _show_one_line(f,l,false,true) l 928 tools/perf/util/probe-event.c #define show_one_line(f,l) _show_one_line(f,l,false,false) l 929 tools/perf/util/probe-event.c #define skip_one_line(f,l) _show_one_line(f,l,true,false) l 930 tools/perf/util/probe-event.c #define show_one_line_or_eof(f,l) __show_one_line(f,l,false,false) l 939 tools/perf/util/probe-event.c int l = 1; l 995 tools/perf/util/probe-event.c while (l < lr->start) { l 996 tools/perf/util/probe-event.c ret = skip_one_line(fp, l++); l 1002 tools/perf/util/probe-event.c for (; ln->i > l; l++) { l 1003 tools/perf/util/probe-event.c ret = show_one_line(fp, l - lr->offset); l 1007 tools/perf/util/probe-event.c ret = show_one_line_with_num(fp, l++ - lr->offset); l 1013 tools/perf/util/probe-event.c lr->end = l + NR_ADDITIONAL_LINES; l 1014 tools/perf/util/probe-event.c while (l <= lr->end) { l 1015 tools/perf/util/probe-event.c ret = show_one_line_or_eof(fp, l++ - lr->offset); l 79 tools/perf/util/sort.c static int64_t cmp_null(const void *l, const void *r) l 81 tools/perf/util/sort.c if (!l && !r) l 83 tools/perf/util/sort.c else if (!l) l 1006 tools/perf/util/sort.c uint64_t l = 0, r = 0; l 1009 tools/perf/util/sort.c l = left->mem_info->daddr.addr; l 1013 tools/perf/util/sort.c return (int64_t)(r - l); l 1035 tools/perf/util/sort.c uint64_t l = 0, r = 0; l 1038 tools/perf/util/sort.c l = left->mem_info->iaddr.addr; l 1042 tools/perf/util/sort.c return (int64_t)(r - l); l 1201 tools/perf/util/sort.c u64 l, r; l 1253 tools/perf/util/sort.c l = cl_address(left->mem_info->daddr.al_addr); l 1256 tools/perf/util/sort.c if (l > r) return -1; l 1257 tools/perf/util/sort.c if (l < r) return 1; l 1399 tools/perf/util/sort.c uint64_t l = 0, r = 0; l 1402 tools/perf/util/sort.c l = left->mem_info->daddr.phys_addr; l 1406 tools/perf/util/sort.c return (int64_t)(r - l); l 67 tools/perf/util/srccode.c int l; l 73 tools/perf/util/srccode.c l = 0; l 74 tools/perf/util/srccode.c lines[l++] = map; l 76 tools/perf/util/srccode.c if (l >= maxline) l 78 tools/perf/util/srccode.c lines[l++] = ++p; l 81 tools/perf/util/srccode.c lines[l] = p; l 166 tools/perf/util/srccode.c char *l, *p; l 173 tools/perf/util/srccode.c l = sf->lines[line]; l 174 tools/perf/util/srccode.c if (!l) l 176 tools/perf/util/srccode.c p = memchr(l, '\n', sf->map + sf->maplen - l); l 177 tools/perf/util/srccode.c *lenp = p - l; l 178 tools/perf/util/srccode.c return l; l 24 tools/perf/util/strfilter.c strfilter_node__delete(node->l); l 67 tools/perf/util/strfilter.c struct strfilter_node *l, l 74 tools/perf/util/strfilter.c node->l = l; l 226 tools/perf/util/strfilter.c return strfilter_node__compare(node->l, str) || l 229 tools/perf/util/strfilter.c return strfilter_node__compare(node->l, str) && l 274 tools/perf/util/strfilter.c len = strfilter_node__sprint_pt(node->l, buf); l 11 tools/perf/util/strfilter.h struct strfilter_node *l; /* Tree left branche (for &,|) */ l 32 tools/power/x86/intel-speed-select/isst.h #define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (sizeof(long) * 8 - 1 - (h)))) l 33 tools/power/x86/intel-speed-select/isst.h #define GENMASK_ULL(h, l) \ l 34 tools/power/x86/intel-speed-select/isst.h (((~0ULL) << (l)) & (~0ULL >> (sizeof(long long) * 8 - 1 - (h)))) l 20 tools/testing/radix-tree/benchmark.c int l, loops = 1; l 27 tools/testing/radix-tree/benchmark.c for (l = 0; l < loops; l++) { l 52 tools/testing/selftests/bpf/progs/btf_dump_test_case_ordering.c struct list_head l; l 111 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c fn_ptr_arr2_t l; l 703 tools/testing/selftests/bpf/test_align.c unsigned int l = atoi(argv[argc - 2]); l 706 tools/testing/selftests/bpf/test_align.c if (l < to && u < to) { l 707 tools/testing/selftests/bpf/test_align.c from = l; l 1200 tools/testing/selftests/bpf/test_sockmap.c int err, i, l, r; l 1213 tools/testing/selftests/bpf/test_sockmap.c for (l = 1; l < 100; l += 33) { l 1216 tools/testing/selftests/bpf/test_sockmap.c opt.iov_length = l; l 1148 tools/testing/selftests/bpf/test_verifier.c unsigned int l = atoi(argv[arg]); l 1151 tools/testing/selftests/bpf/test_verifier.c if (l < to && u < to) { l 1152 tools/testing/selftests/bpf/test_verifier.c from = l; l 61 tools/testing/selftests/kvm/include/x86_64/processor.h unsigned limit1:4, avl:1, l:1, db:1, g:1, base2:8; l 142 tools/testing/selftests/kvm/lib/x86_64/processor.c segment->db, segment->s, segment->l); l 454 tools/testing/selftests/kvm/lib/x86_64/processor.c desc->l = segp->l; l 488 tools/testing/selftests/kvm/lib/x86_64/processor.c segp->l = true; l 239 tools/testing/selftests/memfd/memfd_test.c ssize_t l; l 241 tools/testing/selftests/memfd/memfd_test.c l = read(fd, buf, sizeof(buf)); l 242 tools/testing/selftests/memfd/memfd_test.c if (l != sizeof(buf)) { l 295 tools/testing/selftests/memfd/memfd_test.c ssize_t l; l 305 tools/testing/selftests/memfd/memfd_test.c l = write(fd, "\0\0\0\0", 4); l 306 tools/testing/selftests/memfd/memfd_test.c if (l != 4) { l 375 tools/testing/selftests/memfd/memfd_test.c ssize_t l; l 380 tools/testing/selftests/memfd/memfd_test.c l = write(fd, "data", 4); l 381 tools/testing/selftests/memfd/memfd_test.c if (l != -EPERM) { l 382 tools/testing/selftests/memfd/memfd_test.c printf("expected EPERM on write(), but got %d: %m\n", (int)l); l 519 tools/testing/selftests/memfd/memfd_test.c ssize_t l; l 531 tools/testing/selftests/memfd/memfd_test.c l = pwrite(fd, buf, mfd_def_size * 8, 0); l 532 tools/testing/selftests/memfd/memfd_test.c if (l != (mfd_def_size * 8)) { l 543 tools/testing/selftests/memfd/memfd_test.c ssize_t l; l 555 tools/testing/selftests/memfd/memfd_test.c l = pwrite(fd, buf, mfd_def_size * 8, 0); l 556 tools/testing/selftests/memfd/memfd_test.c if (l == (mfd_def_size * 8)) { l 13 tools/testing/selftests/powerpc/include/basic_asm.h ori reg, reg, (expr)@l; l 212 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h addi r2,r2,(.TOC.-0b)@l; \ l 312 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h addi reg,reg,(name - 0b)@l; l 324 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h ori \r, \r, (\x)@l l 327 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h li \r, (\x)@l l 339 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h ori \r, \r, (\x)@l l 354 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h ori reg, reg, (expr)@l; \ l 372 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h addi reg,reg,(expr)@l; l 377 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h #define ADDROFF(name) name@l l 830 tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h ori reg,reg,BUCSR_INIT@l; \ l 127 tools/testing/selftests/powerpc/vphn/asm/lppaca.h static inline bool lppaca_shared_proc(struct lppaca *l) l 131 tools/testing/selftests/powerpc/vphn/asm/lppaca.h return !!(l->__old_status & LPPACA_OLD_SHARED_PROC); l 160 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 162 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 163 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 174 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 227 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 229 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 230 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 241 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 299 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 306 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 353 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 355 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 356 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 367 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 428 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 430 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 431 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 442 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 504 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 506 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 507 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 508 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) l 523 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 588 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 590 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 591 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 712 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 714 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 715 tools/testing/selftests/rseq/rseq-arm.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 214 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) l 216 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) l 217 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) l 222 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 225 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 226 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 268 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) l 270 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) l 271 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) l 276 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail]) l 279 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 280 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2]) l 325 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) l 331 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 370 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) l 372 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) l 373 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) l 378 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 381 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 382 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 429 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) l 431 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) l 432 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) l 437 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 440 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 441 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 488 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) l 490 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) l 491 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) l 492 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error3]) l 497 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 499 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail]) l 502 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 503 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 504 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3]) l 551 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) l 553 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) l 554 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) l 559 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 562 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 563 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 611 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) l 613 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) l 614 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) l 619 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 622 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 623 tools/testing/selftests/rseq/rseq-arm64.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 167 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 169 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 170 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 180 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 232 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 234 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 235 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 245 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 302 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 309 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 356 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 358 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 359 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 369 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 429 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 431 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 432 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 442 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 503 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 505 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 506 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 507 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) l 520 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 583 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 585 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 586 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 704 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 706 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 707 tools/testing/selftests/rseq/rseq-mips.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 213 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 215 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 216 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 224 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 228 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 230 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 273 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 275 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 276 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 284 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail]) l 288 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 290 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2]) l 341 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 350 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 394 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 396 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 397 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 405 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 409 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 411 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 462 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 464 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 465 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 473 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 477 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 479 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 532 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 534 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 535 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 536 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) l 544 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 547 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail]) l 551 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 553 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 555 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3]) l 605 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 607 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 608 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 620 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 624 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 626 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 679 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 681 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 682 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 694 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) l 698 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 700 tools/testing/selftests/rseq/rseq-ppc.h RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) l 144 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 146 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 147 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 157 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 207 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 209 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 210 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 221 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 273 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 280 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 323 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 325 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 326 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 336 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 399 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 401 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 402 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 403 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) l 416 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) l 473 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 475 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 476 tools/testing/selftests/rseq/rseq-s390.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 120 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 122 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 123 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 133 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 181 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 183 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 184 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 195 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 245 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 252 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 291 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 293 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 294 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 304 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 365 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 367 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 368 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 369 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) l 382 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 437 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 439 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 440 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 641 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 643 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 644 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 654 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 702 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 704 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 705 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 716 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 766 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 773 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 812 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 814 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 815 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 825 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 878 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 880 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 881 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 892 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 947 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 949 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 950 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 951 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) l 964 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_CPU_ID_OFFSET(%[rseq_abi]), %l[error1]) l 1021 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 1023 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 1024 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 1133 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) l 1135 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) l 1136 tools/testing/selftests/rseq/rseq-x86.h RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) l 55 tools/testing/selftests/vm/thuge-gen.c int l = 0; l 56 tools/testing/selftests/vm/thuge-gen.c while ((1UL << l) < v) l 57 tools/testing/selftests/vm/thuge-gen.c l++; l 58 tools/testing/selftests/vm/thuge-gen.c return l; l 352 tools/testing/selftests/x86/sigreturn.c bool l = (ar & (1 << 21)); l 357 tools/testing/selftests/x86/sigreturn.c if (l && !db) l 359 tools/testing/selftests/x86/sigreturn.c else if (!l && db) l 361 tools/testing/selftests/x86/sigreturn.c else if (!l && !db) l 221 tools/vm/slabinfo.c size_t l; l 227 tools/vm/slabinfo.c l = 0; l 229 tools/vm/slabinfo.c l = fread(buffer, 1, sizeof(buffer), f); l 230 tools/vm/slabinfo.c buffer[l] = 0; l 233 tools/vm/slabinfo.c return l;