1/*
2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3 * Licensed under the GPL
4 */
5
6#include <linux/percpu.h>
7#include <linux/sched.h>
8#include <linux/syscalls.h>
9#include <asm/uaccess.h>
10#include <os.h>
11#include <skas.h>
12#include <sysdep/tls.h>
13
14/*
15 * If needed we can detect when it's uninitialized.
16 *
17 * These are initialized in an initcall and unchanged thereafter.
18 */
19static int host_supports_tls = -1;
20int host_gdt_entry_tls_min;
21
22int do_set_thread_area(struct user_desc *info)
23{
24	int ret;
25	u32 cpu;
26
27	cpu = get_cpu();
28	ret = os_set_thread_area(info, userspace_pid[cpu]);
29	put_cpu();
30
31	if (ret)
32		printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
33		       "index = %d\n", ret, info->entry_number);
34
35	return ret;
36}
37
38int do_get_thread_area(struct user_desc *info)
39{
40	int ret;
41	u32 cpu;
42
43	cpu = get_cpu();
44	ret = os_get_thread_area(info, userspace_pid[cpu]);
45	put_cpu();
46
47	if (ret)
48		printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
49		       "index = %d\n", ret, info->entry_number);
50
51	return ret;
52}
53
54/*
55 * sys_get_thread_area: get a yet unused TLS descriptor index.
56 * XXX: Consider leaving one free slot for glibc usage at first place. This must
57 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
58 *
59 * Also, this must be tested when compiling in SKAS mode with dynamic linking
60 * and running against NPTL.
61 */
62static int get_free_idx(struct task_struct* task)
63{
64	struct thread_struct *t = &task->thread;
65	int idx;
66
67	if (!t->arch.tls_array)
68		return GDT_ENTRY_TLS_MIN;
69
70	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
71		if (!t->arch.tls_array[idx].present)
72			return idx + GDT_ENTRY_TLS_MIN;
73	return -ESRCH;
74}
75
76static inline void clear_user_desc(struct user_desc* info)
77{
78	/* Postcondition: LDT_empty(info) returns true. */
79	memset(info, 0, sizeof(*info));
80
81	/*
82	 * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
83	 * indeed an empty user_desc.
84	 */
85	info->read_exec_only = 1;
86	info->seg_not_present = 1;
87}
88
89#define O_FORCE 1
90
91static int load_TLS(int flags, struct task_struct *to)
92{
93	int ret = 0;
94	int idx;
95
96	for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
97		struct uml_tls_struct* curr =
98			&to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
99
100		/*
101		 * Actually, now if it wasn't flushed it gets cleared and
102		 * flushed to the host, which will clear it.
103		 */
104		if (!curr->present) {
105			if (!curr->flushed) {
106				clear_user_desc(&curr->tls);
107				curr->tls.entry_number = idx;
108			} else {
109				WARN_ON(!LDT_empty(&curr->tls));
110				continue;
111			}
112		}
113
114		if (!(flags & O_FORCE) && curr->flushed)
115			continue;
116
117		ret = do_set_thread_area(&curr->tls);
118		if (ret)
119			goto out;
120
121		curr->flushed = 1;
122	}
123out:
124	return ret;
125}
126
127/*
128 * Verify if we need to do a flush for the new process, i.e. if there are any
129 * present desc's, only if they haven't been flushed.
130 */
131static inline int needs_TLS_update(struct task_struct *task)
132{
133	int i;
134	int ret = 0;
135
136	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
137		struct uml_tls_struct* curr =
138			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
139
140		/*
141		 * Can't test curr->present, we may need to clear a descriptor
142		 * which had a value.
143		 */
144		if (curr->flushed)
145			continue;
146		ret = 1;
147		break;
148	}
149	return ret;
150}
151
152/*
153 * On a newly forked process, the TLS descriptors haven't yet been flushed. So
154 * we mark them as such and the first switch_to will do the job.
155 */
156void clear_flushed_tls(struct task_struct *task)
157{
158	int i;
159
160	for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
161		struct uml_tls_struct* curr =
162			&task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
163
164		/*
165		 * Still correct to do this, if it wasn't present on the host it
166		 * will remain as flushed as it was.
167		 */
168		if (!curr->present)
169			continue;
170
171		curr->flushed = 0;
172	}
173}
174
175/*
176 * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
177 * common host process. So this is needed in SKAS0 too.
178 *
179 * However, if each thread had a different host process (and this was discussed
180 * for SMP support) this won't be needed.
181 *
182 * And this will not need be used when (and if) we'll add support to the host
183 * SKAS patch.
184 */
185
186int arch_switch_tls(struct task_struct *to)
187{
188	if (!host_supports_tls)
189		return 0;
190
191	/*
192	 * We have no need whatsoever to switch TLS for kernel threads; beyond
193	 * that, that would also result in us calling os_set_thread_area with
194	 * userspace_pid[cpu] == 0, which gives an error.
195	 */
196	if (likely(to->mm))
197		return load_TLS(O_FORCE, to);
198
199	return 0;
200}
201
202static int set_tls_entry(struct task_struct* task, struct user_desc *info,
203			 int idx, int flushed)
204{
205	struct thread_struct *t = &task->thread;
206
207	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
208		return -EINVAL;
209
210	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
211	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
212	t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
213
214	return 0;
215}
216
217int arch_copy_tls(struct task_struct *new)
218{
219	struct user_desc info;
220	int idx, ret = -EFAULT;
221
222	if (copy_from_user(&info,
223			   (void __user *) UPT_SI(&new->thread.regs.regs),
224			   sizeof(info)))
225		goto out;
226
227	ret = -EINVAL;
228	if (LDT_empty(&info))
229		goto out;
230
231	idx = info.entry_number;
232
233	ret = set_tls_entry(new, &info, idx, 0);
234out:
235	return ret;
236}
237
238/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
239static int get_tls_entry(struct task_struct *task, struct user_desc *info,
240			 int idx)
241{
242	struct thread_struct *t = &task->thread;
243
244	if (!t->arch.tls_array)
245		goto clear;
246
247	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
248		return -EINVAL;
249
250	if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
251		goto clear;
252
253	*info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
254
255out:
256	/*
257	 * Temporary debugging check, to make sure that things have been
258	 * flushed. This could be triggered if load_TLS() failed.
259	 */
260	if (unlikely(task == current &&
261		     !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
262		printk(KERN_ERR "get_tls_entry: task with pid %d got here "
263				"without flushed TLS.", current->pid);
264	}
265
266	return 0;
267clear:
268	/*
269	 * When the TLS entry has not been set, the values read to user in the
270	 * tls_array are 0 (because it's cleared at boot, see
271	 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
272	 */
273	clear_user_desc(info);
274	info->entry_number = idx;
275	goto out;
276}
277
278SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
279{
280	struct user_desc info;
281	int idx, ret;
282
283	if (!host_supports_tls)
284		return -ENOSYS;
285
286	if (copy_from_user(&info, user_desc, sizeof(info)))
287		return -EFAULT;
288
289	idx = info.entry_number;
290
291	if (idx == -1) {
292		idx = get_free_idx(current);
293		if (idx < 0)
294			return idx;
295		info.entry_number = idx;
296		/* Tell the user which slot we chose for him.*/
297		if (put_user(idx, &user_desc->entry_number))
298			return -EFAULT;
299	}
300
301	ret = do_set_thread_area(&info);
302	if (ret)
303		return ret;
304	return set_tls_entry(current, &info, idx, 1);
305}
306
307/*
308 * Perform set_thread_area on behalf of the traced child.
309 * Note: error handling is not done on the deferred load, and this differ from
310 * i386. However the only possible error are caused by bugs.
311 */
312int ptrace_set_thread_area(struct task_struct *child, int idx,
313			   struct user_desc __user *user_desc)
314{
315	struct user_desc info;
316
317	if (!host_supports_tls)
318		return -EIO;
319
320	if (copy_from_user(&info, user_desc, sizeof(info)))
321		return -EFAULT;
322
323	return set_tls_entry(child, &info, idx, 0);
324}
325
326SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
327{
328	struct user_desc info;
329	int idx, ret;
330
331	if (!host_supports_tls)
332		return -ENOSYS;
333
334	if (get_user(idx, &user_desc->entry_number))
335		return -EFAULT;
336
337	ret = get_tls_entry(current, &info, idx);
338	if (ret < 0)
339		goto out;
340
341	if (copy_to_user(user_desc, &info, sizeof(info)))
342		ret = -EFAULT;
343
344out:
345	return ret;
346}
347
348/*
349 * Perform get_thread_area on behalf of the traced child.
350 */
351int ptrace_get_thread_area(struct task_struct *child, int idx,
352		struct user_desc __user *user_desc)
353{
354	struct user_desc info;
355	int ret;
356
357	if (!host_supports_tls)
358		return -EIO;
359
360	ret = get_tls_entry(child, &info, idx);
361	if (ret < 0)
362		goto out;
363
364	if (copy_to_user(user_desc, &info, sizeof(info)))
365		ret = -EFAULT;
366out:
367	return ret;
368}
369
370/*
371 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
372 * if a 32-bit UML is running on a 64-bit host.
373 */
374static int __init __setup_host_supports_tls(void)
375{
376	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
377	if (host_supports_tls) {
378		printk(KERN_INFO "Host TLS support detected\n");
379		printk(KERN_INFO "Detected host type: ");
380		switch (host_gdt_entry_tls_min) {
381		case GDT_ENTRY_TLS_MIN_I386:
382			printk(KERN_CONT "i386");
383			break;
384		case GDT_ENTRY_TLS_MIN_X86_64:
385			printk(KERN_CONT "x86_64");
386			break;
387		}
388		printk(KERN_CONT " (GDT indexes %d to %d)\n",
389		       host_gdt_entry_tls_min,
390		       host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
391	} else
392		printk(KERN_ERR "  Host TLS support NOT detected! "
393				"TLS support inside UML will not work\n");
394	return 0;
395}
396
397__initcall(__setup_host_supports_tls);
398