1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 */
12#ifndef _ASM_SWITCH_TO_H
13#define _ASM_SWITCH_TO_H
14
15#include <asm/cpu-features.h>
16#include <asm/watch.h>
17#include <asm/dsp.h>
18#include <asm/cop2.h>
19#include <asm/msa.h>
20
21struct task_struct;
22
23enum {
24	FP_SAVE_NONE	= 0,
25	FP_SAVE_VECTOR	= -1,
26	FP_SAVE_SCALAR	= 1,
27};
28
29/**
30 * resume - resume execution of a task
31 * @prev:	The task previously executed.
32 * @next:	The task to begin executing.
33 * @next_ti:	task_thread_info(next).
34 * @fp_save:	Which, if any, FP context to save for prev.
35 *
36 * This function is used whilst scheduling to save the context of prev & load
37 * the context of next. Returns prev.
38 */
39extern asmlinkage struct task_struct *resume(struct task_struct *prev,
40		struct task_struct *next, struct thread_info *next_ti,
41		s32 fp_save);
42
43extern unsigned int ll_bit;
44extern struct task_struct *ll_task;
45
46#ifdef CONFIG_MIPS_MT_FPAFF
47
48/*
49 * Handle the scheduler resume end of FPU affinity management.	We do this
50 * inline to try to keep the overhead down. If we have been forced to run on
51 * a "CPU" with an FPU because of a previous high level of FP computation,
52 * but did not actually use the FPU during the most recent time-slice (CU1
53 * isn't set), we undo the restriction on cpus_allowed.
54 *
55 * We're not calling set_cpus_allowed() here, because we have no need to
56 * force prompt migration - we're already switching the current CPU to a
57 * different thread.
58 */
59
60#define __mips_mt_fpaff_switch_to(prev)					\
61do {									\
62	struct thread_info *__prev_ti = task_thread_info(prev);		\
63									\
64	if (cpu_has_fpu &&						\
65	    test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) &&		\
66	    (!(KSTK_STATUS(prev) & ST0_CU1))) {				\
67		clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND);		\
68		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\
69	}								\
70	next->thread.emulated_fp = 0;					\
71} while(0)
72
73#else
74#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
75#endif
76
77#define __clear_software_ll_bit()					\
78do {	if (cpu_has_rw_llb) {						\
79		write_c0_lladdr(0);					\
80	} else {							\
81		if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
82			ll_bit = 0;					\
83	}								\
84} while (0)
85
86#define switch_to(prev, next, last)					\
87do {									\
88	u32 __c0_stat;							\
89	s32 __fpsave = FP_SAVE_NONE;					\
90	__mips_mt_fpaff_switch_to(prev);				\
91	if (cpu_has_dsp)						\
92		__save_dsp(prev);					\
93	if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) {		\
94		if (cop2_lazy_restore)					\
95			KSTK_STATUS(prev) &= ~ST0_CU2;			\
96		__c0_stat = read_c0_status();				\
97		write_c0_status(__c0_stat | ST0_CU2);			\
98		cop2_save(prev);					\
99		write_c0_status(__c0_stat & ~ST0_CU2);			\
100	}								\
101	__clear_software_ll_bit();					\
102	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU))		\
103		__fpsave = FP_SAVE_SCALAR;				\
104	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))		\
105		__fpsave = FP_SAVE_VECTOR;				\
106	(last) = resume(prev, next, task_thread_info(next), __fpsave);	\
107} while (0)
108
109#define finish_arch_switch(prev)					\
110do {									\
111	u32 __c0_stat;							\
112	if (cop2_present && !cop2_lazy_restore &&			\
113			(KSTK_STATUS(current) & ST0_CU2)) {		\
114		__c0_stat = read_c0_status();				\
115		write_c0_status(__c0_stat | ST0_CU2);			\
116		cop2_restore(current);					\
117		write_c0_status(__c0_stat & ~ST0_CU2);			\
118	}								\
119	if (cpu_has_dsp)						\
120		__restore_dsp(current);					\
121	if (cpu_has_userlocal)						\
122		write_c0_userlocal(current_thread_info()->tp_value);	\
123	__restore_watch();						\
124	disable_msa();							\
125} while (0)
126
127#endif /* _ASM_SWITCH_TO_H */
128