1/* 2 * x86 semaphore implementation. 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Portions Copyright 1999 Red Hat, Inc. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 * 13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> 14 */ 15 16#include <linux/linkage.h> 17#include <asm/alternative-asm.h> 18#include <asm/dwarf2.h> 19 20#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) 21#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) 22 23#ifdef CONFIG_X86_32 24 25/* 26 * The semaphore operations have a special calling sequence that 27 * allow us to do a simpler in-line version of them. These routines 28 * need to convert that sequence back into the C sequence when 29 * there is contention on the semaphore. 30 * 31 * %eax contains the semaphore pointer on entry. Save the C-clobbered 32 * registers (%eax, %edx and %ecx) except %eax whish is either a return 33 * value or just clobbered.. 34 */ 35 36#define save_common_regs \ 37 pushl_cfi_reg ecx 38 39#define restore_common_regs \ 40 popl_cfi_reg ecx 41 42 /* Avoid uglifying the argument copying x86-64 needs to do. */ 43 .macro movq src, dst 44 .endm 45 46#else 47 48/* 49 * x86-64 rwsem wrappers 50 * 51 * This interfaces the inline asm code to the slow-path 52 * C routines. We need to save the call-clobbered regs 53 * that the asm does not mark as clobbered, and move the 54 * argument from %rax to %rdi. 55 * 56 * NOTE! We don't need to save %rax, because the functions 57 * will always return the semaphore pointer in %rax (which 58 * is also the input argument to these helpers) 59 * 60 * The following can clobber %rdx because the asm clobbers it: 61 * call_rwsem_down_write_failed 62 * call_rwsem_wake 63 * but %rdi, %rsi, %rcx, %r8-r11 always need saving. 64 */ 65 66#define save_common_regs \ 67 pushq_cfi_reg rdi; \ 68 pushq_cfi_reg rsi; \ 69 pushq_cfi_reg rcx; \ 70 pushq_cfi_reg r8; \ 71 pushq_cfi_reg r9; \ 72 pushq_cfi_reg r10; \ 73 pushq_cfi_reg r11 74 75#define restore_common_regs \ 76 popq_cfi_reg r11; \ 77 popq_cfi_reg r10; \ 78 popq_cfi_reg r9; \ 79 popq_cfi_reg r8; \ 80 popq_cfi_reg rcx; \ 81 popq_cfi_reg rsi; \ 82 popq_cfi_reg rdi 83 84#endif 85 86/* Fix up special calling conventions */ 87ENTRY(call_rwsem_down_read_failed) 88 CFI_STARTPROC 89 save_common_regs 90 __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 91 movq %rax,%rdi 92 call rwsem_down_read_failed 93 __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 94 restore_common_regs 95 ret 96 CFI_ENDPROC 97ENDPROC(call_rwsem_down_read_failed) 98 99ENTRY(call_rwsem_down_write_failed) 100 CFI_STARTPROC 101 save_common_regs 102 movq %rax,%rdi 103 call rwsem_down_write_failed 104 restore_common_regs 105 ret 106 CFI_ENDPROC 107ENDPROC(call_rwsem_down_write_failed) 108 109ENTRY(call_rwsem_wake) 110 CFI_STARTPROC 111 /* do nothing if still outstanding active readers */ 112 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) 113 jnz 1f 114 save_common_regs 115 movq %rax,%rdi 116 call rwsem_wake 117 restore_common_regs 1181: ret 119 CFI_ENDPROC 120ENDPROC(call_rwsem_wake) 121 122ENTRY(call_rwsem_downgrade_wake) 123 CFI_STARTPROC 124 save_common_regs 125 __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 126 movq %rax,%rdi 127 call rwsem_downgrade_wake 128 __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 129 restore_common_regs 130 ret 131 CFI_ENDPROC 132ENDPROC(call_rwsem_downgrade_wake) 133