from_mm 21 arch/um/include/asm/mmu.h extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm); from_mm 61 arch/um/include/asm/processor-generic.h static inline void mm_copy_segments(struct mm_struct *from_mm, from_mm 53 arch/um/kernel/skas/mmu.c struct mm_context *from_mm = NULL; from_mm 64 arch/um/kernel/skas/mmu.c from_mm = ¤t->mm->context; from_mm 67 arch/um/kernel/skas/mmu.c if (from_mm) from_mm 69 arch/um/kernel/skas/mmu.c from_mm->id.u.pid); from_mm 78 arch/um/kernel/skas/mmu.c ret = init_new_ldt(to_mm, from_mm); from_mm 298 arch/x86/um/ldt.c long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm) from_mm 309 arch/x86/um/ldt.c if (!from_mm) { from_mm 335 arch/x86/um/ldt.c mutex_lock(&from_mm->arch.ldt.lock); from_mm 336 arch/x86/um/ldt.c if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES) from_mm 337 arch/x86/um/ldt.c memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, from_mm 340 arch/x86/um/ldt.c i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; from_mm 350 arch/x86/um/ldt.c from_mm->arch.ldt.u.pages[i], PAGE_SIZE); from_mm 353 arch/x86/um/ldt.c new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count; from_mm 354 arch/x86/um/ldt.c mutex_unlock(&from_mm->arch.ldt.lock);