lend               61 arch/xtensa/include/asm/ptrace.h 	unsigned long lend;		/*  36 */
lend               47 arch/xtensa/include/uapi/asm/ptrace.h 	__u32 lend;
lend               40 arch/xtensa/kernel/asm-offsets.c 	DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
lend               49 arch/xtensa/kernel/ptrace.c 		.lend = regs->lend,
lend               89 arch/xtensa/kernel/ptrace.c 	regs->lend = newregs.lend;
lend              315 arch/xtensa/kernel/ptrace.c 		tmp = regs->lend;
lend              144 arch/xtensa/kernel/signal.c 	COPY(lend);
lend              183 arch/xtensa/kernel/signal.c 	COPY(lend);
lend              206 arch/xtensa/kernel/signal.c 	    && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
lend              473 arch/xtensa/kernel/traps.c 		regs->lbeg, regs->lend, regs->lcount, regs->sar);
lend              386 drivers/md/dm-cache-policy-smq.c static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
lend              390 drivers/md/dm-cache-policy-smq.c 	BUG_ON(lbegin > lend);
lend              391 drivers/md/dm-cache-policy-smq.c 	BUG_ON(lend > q->nr_levels);
lend              392 drivers/md/dm-cache-policy-smq.c 	nr_levels = lend - lbegin;
lend              396 drivers/md/dm-cache-policy-smq.c 	for (level = lbegin; level < lend; level++)
lend             2637 fs/ext4/ext4.h 			     loff_t lstart, loff_t lend);
lend             1793 fs/gfs2/bmap.c 		u64 lend;
lend             1802 fs/gfs2/bmap.c 		lend = end_offset >> bsize_shift;
lend             1804 fs/gfs2/bmap.c 		if (lblock >= lend)
lend             1807 fs/gfs2/bmap.c 		find_metapath(sdp, lend, &mp, ip->i_height);
lend              415 fs/hugetlbfs/inode.c 				   loff_t lend)
lend              420 fs/hugetlbfs/inode.c 	const pgoff_t end = lend >> huge_page_shift(h);
lend              425 fs/hugetlbfs/inode.c 	bool truncate_op = (lend == LLONG_MAX);
lend              515 fs/nfs/internal.h 		loff_t lstart, loff_t lend);
lend             2008 fs/nfs/write.c 		loff_t lstart, loff_t lend)
lend             2012 fs/nfs/write.c 	ret = filemap_write_and_wait_range(mapping, lstart, lend);
lend             2738 include/linux/fs.h 				   loff_t lend);
lend             2748 include/linux/fs.h 				  loff_t lend);
lend             2751 include/linux/fs.h 				        loff_t lstart, loff_t lend);
lend             2760 include/linux/fs.h 						loff_t lend);
lend             2417 include/linux/mm.h 				       loff_t lstart, loff_t lend);
lend              676 mm/filemap.c   				 loff_t lstart, loff_t lend)
lend              681 mm/filemap.c   		err = __filemap_fdatawrite_range(mapping, lstart, lend,
lend              686 mm/filemap.c   						lstart, lend);
lend              776 mm/filemap.c   int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
lend              782 mm/filemap.c   		err = __filemap_fdatawrite_range(mapping, lstart, lend,
lend              786 mm/filemap.c   			__filemap_fdatawait_range(mapping, lstart, lend);
lend              795 mm/shmem.c     static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
lend              801 mm/shmem.c     	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
lend              803 mm/shmem.c     	unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
lend              810 mm/shmem.c     	if (lend == -1)
lend              990 mm/shmem.c     void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
lend              992 mm/shmem.c     	shmem_undo_range(inode, lstart, lend, false);
lend             4079 mm/shmem.c     void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
lend             4081 mm/shmem.c     	truncate_inode_pages_range(inode->i_mapping, lstart, lend);
lend              292 mm/truncate.c  				loff_t lstart, loff_t lend)
lend              308 mm/truncate.c  	partial_end = (lend + 1) & (PAGE_SIZE - 1);
lend              317 mm/truncate.c  	if (lend == -1)
lend              325 mm/truncate.c  		end = (lend + 1) >> PAGE_SHIFT;
lend              910 mm/truncate.c  void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
lend              914 mm/truncate.c  	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
lend              931 mm/truncate.c  	truncate_inode_pages_range(mapping, lstart, lend);