1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#ifndef _ASM_TILE_SWITCH_TO_H
16#define _ASM_TILE_SWITCH_TO_H
17
18#include <arch/sim_def.h>
19
20/*
21 * switch_to(n) should switch tasks to task nr n, first
22 * checking that n isn't the current task, in which case it does nothing.
23 * The number of callee-saved registers saved on the kernel stack
24 * is defined here for use in copy_thread() and must agree with __switch_to().
25 */
26#define CALLEE_SAVED_FIRST_REG 30
27#define CALLEE_SAVED_REGS_COUNT 24   /* r30 to r52, plus an empty to align */
28
29#ifndef __ASSEMBLY__
30
31struct task_struct;
32
33/*
34 * Pause the DMA engine and static network before task switching.
35 */
36#define prepare_arch_switch(next) _prepare_arch_switch(next)
37void _prepare_arch_switch(struct task_struct *next);
38
39struct task_struct;
40#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
41extern struct task_struct *_switch_to(struct task_struct *prev,
42				      struct task_struct *next);
43
44/* Helper function for _switch_to(). */
45extern struct task_struct *__switch_to(struct task_struct *prev,
46				       struct task_struct *next,
47				       unsigned long new_system_save_k_0);
48
49/* Address that switched-away from tasks are at. */
50extern unsigned long get_switch_to_pc(void);
51
52/*
53 * Kernel threads can check to see if they need to migrate their
54 * stack whenever they return from a context switch; for user
55 * threads, we defer until they are returning to user-space.
56 * We defer homecache migration until the runqueue lock is released.
57 */
58#define finish_arch_post_lock_switch() do {                               \
59	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH |             \
60		(current->pid << _SIM_CONTROL_OPERATOR_BITS));            \
61	if (current->mm == NULL && !kstack_hash &&                        \
62	    current_thread_info()->homecache_cpu != raw_smp_processor_id()) \
63		homecache_migrate_kthread();                              \
64} while (0)
65
66/* Support function for forking a new task. */
67void ret_from_fork(void);
68
69/* Support function for forking a new kernel thread. */
70void ret_from_kernel_thread(void *fn, void *arg);
71
72/* Called from ret_from_xxx() when a new process starts up. */
73struct task_struct *sim_notify_fork(struct task_struct *prev);
74
75#endif /* !__ASSEMBLY__ */
76
77#endif /* _ASM_TILE_SWITCH_TO_H */
78