kernel_start      461 arch/arm64/mm/mmu.c 	phys_addr_t kernel_start = __pa_symbol(_text);
kernel_start      475 arch/arm64/mm/mmu.c 	memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
kernel_start      505 arch/arm64/mm/mmu.c 	__map_memblock(pgdp, kernel_start, kernel_end,
kernel_start      507 arch/arm64/mm/mmu.c 	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
kernel_start      975 arch/mips/cavium-octeon/setup.c 	uint64_t kernel_start;
kernel_start     1086 arch/mips/cavium-octeon/setup.c 	kernel_start = (unsigned long) _text;
kernel_start     1090 arch/mips/cavium-octeon/setup.c 	kernel_start &= ~0xffffffff80000000ULL;
kernel_start     1091 arch/mips/cavium-octeon/setup.c 	add_memory_region(kernel_start, kernel_size, BOOT_MEM_RAM);
kernel_start       18 arch/mips/include/asm/lasat/head.h 	u32 kernel_start;
kernel_start      366 arch/parisc/mm/init.c 	unsigned long kernel_start, kernel_end;
kernel_start      370 arch/parisc/mm/init.c 	kernel_start = __pa((unsigned long)&__init_begin);
kernel_start      438 arch/parisc/mm/init.c 				} else if (address < kernel_start || address >= kernel_end) {
kernel_start      284 arch/x86/mm/mem_encrypt_identity.c 	unsigned long kernel_start, kernel_end, kernel_len;
kernel_start      309 arch/x86/mm/mem_encrypt_identity.c 	kernel_start = __pa_symbol(_text);
kernel_start      311 arch/x86/mm/mem_encrypt_identity.c 	kernel_len = kernel_end - kernel_start;
kernel_start      353 arch/x86/mm/mem_encrypt_identity.c 	pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
kernel_start      418 arch/x86/mm/mem_encrypt_identity.c 	ppd.paddr = kernel_start;
kernel_start      419 arch/x86/mm/mem_encrypt_identity.c 	ppd.vaddr = kernel_start;
kernel_start      424 arch/x86/mm/mem_encrypt_identity.c 	ppd.paddr = kernel_start;
kernel_start      425 arch/x86/mm/mem_encrypt_identity.c 	ppd.vaddr = kernel_start + decrypted_base;
kernel_start      456 arch/x86/mm/mem_encrypt_identity.c 	sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
kernel_start      469 arch/x86/mm/mem_encrypt_identity.c 	ppd.vaddr = kernel_start + decrypted_base;
kernel_start       65 tools/perf/util/cs-etm.c 	u64 kernel_start;
kernel_start      623 tools/perf/util/cs-etm.c 	if (address >= etmq->etm->kernel_start) {
kernel_start      833 tools/perf/util/cs-etm.c 	if (!etm->kernel_start)
kernel_start      834 tools/perf/util/cs-etm.c 		etm->kernel_start = machine__kernel_start(etm->machine);
kernel_start      215 tools/perf/util/db-export.c 	u64 kernel_start = machine__kernel_start(machine);
kernel_start      265 tools/perf/util/db-export.c 					     kernel_start);
kernel_start       73 tools/perf/util/intel-pt.c 	u64 kernel_start;
kernel_start      509 tools/perf/util/intel-pt.c 	return ip >= pt->kernel_start ?
kernel_start      690 tools/perf/util/intel-pt.c 	if (ip >= ptq->pt->kernel_start)
kernel_start     1326 tools/perf/util/intel-pt.c 				     sample->ip, pt->kernel_start);
kernel_start     1731 tools/perf/util/intel-pt.c 	cpumode = sample.ip < ptq->pt->kernel_start ?
kernel_start     1754 tools/perf/util/intel-pt.c 				     pt->kernel_start);
kernel_start     2163 tools/perf/util/intel-pt.c 	if (!pt->kernel_start) {
kernel_start     2164 tools/perf/util/intel-pt.c 		pt->kernel_start = machine__kernel_start(pt->machine);
kernel_start     2190 tools/perf/util/intel-pt.c 			    state->from_ip >= pt->kernel_start) {
kernel_start     2208 tools/perf/util/intel-pt.c 		    (state->from_ip >= pt->kernel_start || !state->from_ip) &&
kernel_start     2209 tools/perf/util/intel-pt.c 		    state->to_ip && state->to_ip < pt->kernel_start) {
kernel_start       98 tools/perf/util/machine.c 	machine->kernel_start = 0;
kernel_start     2662 tools/perf/util/machine.c 	machine->kernel_start = 1ULL << 63;
kernel_start     2671 tools/perf/util/machine.c 			machine->kernel_start = map->start;
kernel_start       55 tools/perf/util/machine.h 	u64		  kernel_start;
kernel_start       92 tools/perf/util/machine.h 	if (!machine->kernel_start)
kernel_start       94 tools/perf/util/machine.h 	return machine->kernel_start;
kernel_start       99 tools/perf/util/machine.h 	u64 kernel_start = machine__kernel_start(machine);
kernel_start      101 tools/perf/util/machine.h 	return ip >= kernel_start;
kernel_start       92 tools/perf/util/thread-stack.c 	u64 kernel_start;
kernel_start      141 tools/perf/util/thread-stack.c 		ts->kernel_start = machine__kernel_start(machine);
kernel_start      145 tools/perf/util/thread-stack.c 		ts->kernel_start = 1ULL << 63;
kernel_start      456 tools/perf/util/thread-stack.c static inline u64 callchain_context(u64 ip, u64 kernel_start)
kernel_start      458 tools/perf/util/thread-stack.c 	return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
kernel_start      463 tools/perf/util/thread-stack.c 			  size_t sz, u64 ip, u64 kernel_start)
kernel_start      466 tools/perf/util/thread-stack.c 	u64 context = callchain_context(ip, kernel_start);
kernel_start      487 tools/perf/util/thread-stack.c 		context = callchain_context(ip, kernel_start);
kernel_start      627 tools/perf/util/thread-stack.c 				ts->kernel_start);
kernel_start      661 tools/perf/util/thread-stack.c 	u64 ks = ts->kernel_start;
kernel_start      778 tools/perf/util/thread-stack.c 				ts->kernel_start);
kernel_start      840 tools/perf/util/thread-stack.c 				sample->addr, ts->kernel_start);
kernel_start      912 tools/perf/util/thread-stack.c 					ts->kernel_start);
kernel_start      971 tools/perf/util/thread-stack.c 					ts->kernel_start);
kernel_start       87 tools/perf/util/thread-stack.h 			  size_t sz, u64 ip, u64 kernel_start);