1#ifndef __ASMARM_TLS_H
2#define __ASMARM_TLS_H
3
4#include <linux/compiler.h>
5#include <asm/thread_info.h>
6
7#ifdef __ASSEMBLY__
8#include <asm/asm-offsets.h>
9	.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
10	.endm
11
12	.macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
13	mrc	p15, 0, \tmp2, c13, c0, 2	@ get the user r/w register
14	mcr	p15, 0, \tp, c13, c0, 3		@ set TLS register
15	mcr	p15, 0, \tpuser, c13, c0, 2	@ and the user r/w register
16	str	\tmp2, [\base, #TI_TP_VALUE + 4] @ save it
17	.endm
18
19	.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
20	ldr	\tmp1, =elf_hwcap
21	ldr	\tmp1, [\tmp1, #0]
22	mov	\tmp2, #0xffff0fff
23	tst	\tmp1, #HWCAP_TLS		@ hardware TLS available?
24	streq	\tp, [\tmp2, #-15]		@ set TLS value at 0xffff0ff0
25	mrcne	p15, 0, \tmp2, c13, c0, 2	@ get the user r/w register
26	mcrne	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register
27	mcrne	p15, 0, \tpuser, c13, c0, 2	@ set user r/w register
28	strne	\tmp2, [\base, #TI_TP_VALUE + 4] @ save it
29	.endm
30
31	.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
32	mov	\tmp1, #0xffff0fff
33	str	\tp, [\tmp1, #-15]		@ set TLS value at 0xffff0ff0
34	.endm
35#endif
36
37#ifdef CONFIG_TLS_REG_EMUL
38#define tls_emu		1
39#define has_tls_reg		1
40#define switch_tls	switch_tls_none
41#elif defined(CONFIG_CPU_V6)
42#define tls_emu		0
43#define has_tls_reg		(elf_hwcap & HWCAP_TLS)
44#define switch_tls	switch_tls_v6
45#elif defined(CONFIG_CPU_32v6K)
46#define tls_emu		0
47#define has_tls_reg		1
48#define switch_tls	switch_tls_v6k
49#else
50#define tls_emu		0
51#define has_tls_reg		0
52#define switch_tls	switch_tls_software
53#endif
54
55#ifndef __ASSEMBLY__
56
57static inline void set_tls(unsigned long val)
58{
59	struct thread_info *thread;
60
61	thread = current_thread_info();
62
63	thread->tp_value[0] = val;
64
65	/*
66	 * This code runs with preemption enabled and therefore must
67	 * be reentrant with respect to switch_tls.
68	 *
69	 * We need to ensure ordering between the shadow state and the
70	 * hardware state, so that we don't corrupt the hardware state
71	 * with a stale shadow state during context switch.
72	 *
73	 * If we're preempted here, switch_tls will load TPIDRURO from
74	 * thread_info upon resuming execution and the following mcr
75	 * is merely redundant.
76	 */
77	barrier();
78
79	if (!tls_emu) {
80		if (has_tls_reg) {
81			asm("mcr p15, 0, %0, c13, c0, 3"
82			    : : "r" (val));
83		} else {
84#ifdef CONFIG_KUSER_HELPERS
85			/*
86			 * User space must never try to access this
87			 * directly.  Expect your app to break
88			 * eventually if you do so.  The user helper
89			 * at 0xffff0fe0 must be used instead.  (see
90			 * entry-armv.S for details)
91			 */
92			*((unsigned int *)0xffff0ff0) = val;
93#endif
94		}
95
96	}
97}
98
99static inline unsigned long get_tpuser(void)
100{
101	unsigned long reg = 0;
102
103	if (has_tls_reg && !tls_emu)
104		__asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg));
105
106	return reg;
107}
108
109static inline void set_tpuser(unsigned long val)
110{
111	/* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
112	 * we need not update thread_info.
113	 */
114	if (has_tls_reg && !tls_emu) {
115		asm("mcr p15, 0, %0, c13, c0, 2"
116		    : : "r" (val));
117	}
118}
119
120static inline void flush_tls(void)
121{
122	set_tls(0);
123	set_tpuser(0);
124}
125
126#endif
127#endif	/* __ASMARM_TLS_H */
128