task_size          26 arch/mips/include/asm/hugetlb.h 	unsigned long task_size = STACK_TOP;
task_size          33 arch/mips/include/asm/hugetlb.h 	if (len > task_size)
task_size          35 arch/mips/include/asm/hugetlb.h 	if (task_size - len < addr)
task_size         240 arch/parisc/include/asm/elf.h 	current->thread.task_size = DEFAULT_TASK_SIZE; \
task_size         250 arch/parisc/include/asm/elf.h 		current->thread.task_size = DEFAULT_TASK_SIZE32; \
task_size          25 arch/parisc/include/asm/processor.h #define TASK_SIZE_OF(tsk)       ((tsk)->thread.task_size)
task_size         116 arch/parisc/include/asm/processor.h 	unsigned long  task_size;
task_size         153 arch/parisc/include/asm/processor.h 	.task_size	= DEFAULT_TASK_SIZE, \
task_size          87 arch/parisc/kernel/sys_parisc.c 	unsigned long task_size = TASK_SIZE;
task_size          91 arch/parisc/kernel/sys_parisc.c 	if (len > task_size)
task_size         114 arch/parisc/kernel/sys_parisc.c 		if (task_size - len >= addr &&
task_size         204 arch/powerpc/mm/book3s64/subpage_prot.c 	    addr >= mm->task_size || len >= mm->task_size ||
task_size         205 arch/powerpc/mm/book3s64/subpage_prot.c 	    addr + len > mm->task_size)
task_size         471 arch/powerpc/mm/slice.c 	BUG_ON(mm->task_size == 0);
task_size        1027 arch/s390/kernel/setup.c 	int task_size = sizeof(struct task_struct);
task_size        1030 arch/s390/kernel/setup.c 		task_size -= sizeof(__vector128) * __NUM_VXRS;
task_size        1031 arch/s390/kernel/setup.c 		task_size += sizeof(freg_t) * __NUM_FPRS;
task_size        1033 arch/s390/kernel/setup.c 	arch_task_struct_size = task_size;
task_size          94 arch/sparc/kernel/sys_sparc_64.c 	unsigned long task_size = TASK_SIZE;
task_size         109 arch/sparc/kernel/sys_sparc_64.c 		task_size = STACK_TOP32;
task_size         110 arch/sparc/kernel/sys_sparc_64.c 	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
task_size         124 arch/sparc/kernel/sys_sparc_64.c 		if (task_size - len >= addr &&
task_size         132 arch/sparc/kernel/sys_sparc_64.c 	info.high_limit = min(task_size, VA_EXCLUDE_START);
task_size         137 arch/sparc/kernel/sys_sparc_64.c 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
task_size         140 arch/sparc/kernel/sys_sparc_64.c 		info.high_limit = task_size;
task_size         154 arch/sparc/kernel/sys_sparc_64.c 	unsigned long task_size = STACK_TOP32;
task_size         172 arch/sparc/kernel/sys_sparc_64.c 	if (unlikely(len > task_size))
task_size         187 arch/sparc/kernel/sys_sparc_64.c 		if (task_size - len >= addr &&
task_size         298 arch/sparc/kernel/sys_sparc_64.c 		unsigned long task_size = STACK_TOP32;
task_size         302 arch/sparc/kernel/sys_sparc_64.c 		if (gap > (task_size / 6 * 5))
task_size         303 arch/sparc/kernel/sys_sparc_64.c 			gap = (task_size / 6 * 5);
task_size         305 arch/sparc/kernel/sys_sparc_64.c 		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
task_size          34 arch/sparc/mm/hugetlbpage.c 	unsigned long task_size = TASK_SIZE;
task_size          38 arch/sparc/mm/hugetlbpage.c 		task_size = STACK_TOP32;
task_size          43 arch/sparc/mm/hugetlbpage.c 	info.high_limit = min(task_size, VA_EXCLUDE_START);
task_size          48 arch/sparc/mm/hugetlbpage.c 	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
task_size          51 arch/sparc/mm/hugetlbpage.c 		info.high_limit = task_size;
task_size         104 arch/sparc/mm/hugetlbpage.c 	unsigned long task_size = TASK_SIZE;
task_size         107 arch/sparc/mm/hugetlbpage.c 		task_size = STACK_TOP32;
task_size         111 arch/sparc/mm/hugetlbpage.c 	if (len > task_size)
task_size         123 arch/sparc/mm/hugetlbpage.c 		if (task_size - len >= addr &&
task_size          69 arch/um/include/asm/processor-generic.h extern unsigned long task_size;
task_size          71 arch/um/include/asm/processor-generic.h #define TASK_SIZE (task_size)
task_size         239 arch/um/kernel/um_arch.c unsigned long task_size;
task_size         240 arch/um/kernel/um_arch.c EXPORT_SYMBOL(task_size);
task_size         274 arch/um/kernel/um_arch.c 	task_size = host_task_size & PGDIR_MASK;
task_size         904 arch/x86/include/asm/processor.h #define __TASK_UNMAPPED_BASE(task_size)	(PAGE_ALIGN(task_size / 3))
task_size         160 arch/x86/kernel/fpu/init.c 	int task_size = sizeof(struct task_struct);
task_size         166 arch/x86/kernel/fpu/init.c 	task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
task_size         172 arch/x86/kernel/fpu/init.c 	task_size += fpu_kernel_xstate_size;
task_size         185 arch/x86/kernel/fpu/init.c 	arch_task_struct_size = task_size;
task_size          39 arch/x86/mm/mmap.c static unsigned long stack_maxrandom_size(unsigned long task_size)
task_size          43 arch/x86/mm/mmap.c 		max = (-1UL) & __STACK_RND_MASK(task_size == task_size_32bit());
task_size          80 arch/x86/mm/mmap.c static unsigned long mmap_base(unsigned long rnd, unsigned long task_size,
task_size          84 arch/x86/mm/mmap.c 	unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
task_size          96 arch/x86/mm/mmap.c 	gap_max = (task_size / 6) * 5;
task_size         103 arch/x86/mm/mmap.c 	return PAGE_ALIGN(task_size - gap - rnd);
task_size         107 arch/x86/mm/mmap.c 				      unsigned long task_size)
task_size         109 arch/x86/mm/mmap.c 	return __TASK_UNMAPPED_BASE(task_size) + rnd;
task_size         117 arch/x86/mm/mmap.c 		unsigned long random_factor, unsigned long task_size,
task_size         120 arch/x86/mm/mmap.c 	*legacy_base = mmap_legacy_base(random_factor, task_size);
task_size         124 arch/x86/mm/mmap.c 		*base = mmap_base(random_factor, task_size, rlim_stack);
task_size          16 arch/x86/um/vdso/vma.c extern unsigned long task_size;
task_size          27 arch/x86/um/vdso/vma.c 	um_vdso_addr = task_size - PAGE_SIZE;
task_size         474 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	u32 ctrl_size, task_size, bufdesc_size;
task_size         477 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
task_size         482 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 			  HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN)        |
task_size         574 drivers/net/ethernet/qlogic/qed/qed_cxt.c 	u32 curr_line, total, i, task_size, line;
task_size         719 drivers/net/ethernet/qlogic/qed/qed_cxt.c 			task_size = p_mngr->task_type_size[p_seg->type];
task_size         721 drivers/net/ethernet/qlogic/qed/qed_cxt.c 					     curr_line, total, task_size);
task_size         277 drivers/scsi/qedi/qedi_fw_api.c 					u32 task_size,
task_size         316 drivers/scsi/qedi/qedi_fw_api.c 		val = cpu_to_le32(task_size);
task_size         492 drivers/scsi/qedi/qedi_fw_api.c 	u32 task_size, val;
task_size         495 drivers/scsi/qedi/qedi_fw_api.c 	task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
task_size         507 drivers/scsi/qedi/qedi_fw_api.c 		val = cpu_to_le32(task_size +
task_size         513 drivers/scsi/qedi/qedi_fw_api.c 		val = cpu_to_le32(task_size);
task_size         556 drivers/scsi/qedi/qedi_fw_api.c 		cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
task_size         559 drivers/scsi/qedi/qedi_fw_api.c 	if (exp_data_transfer_len > task_size  ||
task_size         561 drivers/scsi/qedi/qedi_fw_api.c 		exp_data_transfer_len = task_size;
task_size         565 drivers/scsi/qedi/qedi_fw_api.c 				  task_size, exp_data_transfer_len, num_sges,
task_size         570 drivers/scsi/qedi/qedi_fw_api.c 					   task_type, task_size,
task_size        1384 fs/exec.c      	current->mm->task_size = TASK_SIZE;
task_size        1571 fs/proc/task_mmu.c 	end_vaddr = mm->task_size;
task_size        1574 fs/proc/task_mmu.c 	if (svpfn > mm->task_size >> PAGE_SHIFT)
task_size        1277 fs/userfaultfd.c 	__u64 task_size = mm->task_size;
task_size        1289 fs/userfaultfd.c 	if (*start >= task_size)
task_size        1291 fs/userfaultfd.c 	if (len > task_size - *start)
task_size         387 include/linux/mm_types.h 		unsigned long task_size;	/* size of task vm space */
task_size        6112 kernel/events/core.c 	u64 task_size;
task_size        6128 kernel/events/core.c 	task_size  = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
task_size        6129 kernel/events/core.c 	stack_size = min(stack_size, (u16) task_size);
task_size         166 mm/debug.c     		mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
task_size        1040 mm/mempolicy.c 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,