1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_X86_MUTEX_64_H
10#define _ASM_X86_MUTEX_64_H
11
12/**
13 * __mutex_fastpath_lock - decrement and call function if negative
14 * @v: pointer of type atomic_t
15 * @fail_fn: function to call if the result is negative
16 *
17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
18 */
19#ifdef CC_HAVE_ASM_GOTO
20static inline void __mutex_fastpath_lock(atomic_t *v,
21					 void (*fail_fn)(atomic_t *))
22{
23	asm_volatile_goto(LOCK_PREFIX "   decl %0\n"
24			  "   jns %l[exit]\n"
25			  : : "m" (v->counter)
26			  : "memory", "cc"
27			  : exit);
28	fail_fn(v);
29exit:
30	return;
31}
32#else
33#define __mutex_fastpath_lock(v, fail_fn)			\
34do {								\
35	unsigned long dummy;					\
36								\
37	typecheck(atomic_t *, v);				\
38	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
39								\
40	asm volatile(LOCK_PREFIX "   decl (%%rdi)\n"		\
41		     "   jns 1f		\n"			\
42		     "   call " #fail_fn "\n"			\
43		     "1:"					\
44		     : "=D" (dummy)				\
45		     : "D" (v)					\
46		     : "rax", "rsi", "rdx", "rcx",		\
47		       "r8", "r9", "r10", "r11", "memory");	\
48} while (0)
49#endif
50
51/**
52 *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
53 *                                 from 1 to a 0 value
54 *  @count: pointer of type atomic_t
55 *
56 * Change the count from 1 to a value lower than 1. This function returns 0
57 * if the fastpath succeeds, or -1 otherwise.
58 */
59static inline int __mutex_fastpath_lock_retval(atomic_t *count)
60{
61	if (unlikely(atomic_dec_return(count) < 0))
62		return -1;
63	else
64		return 0;
65}
66
67/**
68 * __mutex_fastpath_unlock - increment and call function if nonpositive
69 * @v: pointer of type atomic_t
70 * @fail_fn: function to call if the result is nonpositive
71 *
72 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
73 */
74#ifdef CC_HAVE_ASM_GOTO
75static inline void __mutex_fastpath_unlock(atomic_t *v,
76					   void (*fail_fn)(atomic_t *))
77{
78	asm_volatile_goto(LOCK_PREFIX "   incl %0\n"
79			  "   jg %l[exit]\n"
80			  : : "m" (v->counter)
81			  : "memory", "cc"
82			  : exit);
83	fail_fn(v);
84exit:
85	return;
86}
87#else
88#define __mutex_fastpath_unlock(v, fail_fn)			\
89do {								\
90	unsigned long dummy;					\
91								\
92	typecheck(atomic_t *, v);				\
93	typecheck_fn(void (*)(atomic_t *), fail_fn);		\
94								\
95	asm volatile(LOCK_PREFIX "   incl (%%rdi)\n"		\
96		     "   jg 1f\n"				\
97		     "   call " #fail_fn "\n"			\
98		     "1:"					\
99		     : "=D" (dummy)				\
100		     : "D" (v)					\
101		     : "rax", "rsi", "rdx", "rcx",		\
102		       "r8", "r9", "r10", "r11", "memory");	\
103} while (0)
104#endif
105
106#define __mutex_slowpath_needs_to_unlock()	1
107
108/**
109 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
110 *
111 *  @count: pointer of type atomic_t
112 *  @fail_fn: fallback function
113 *
114 * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
115 * if it wasn't 1 originally. [the fallback function is never used on
116 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
117 */
118static inline int __mutex_fastpath_trylock(atomic_t *count,
119					   int (*fail_fn)(atomic_t *))
120{
121	if (likely(atomic_cmpxchg(count, 1, 0) == 1))
122		return 1;
123	else
124		return 0;
125}
126
127#endif /* _ASM_X86_MUTEX_64_H */
128