1/*
2 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
3 */
4#ifndef _ASM_POWERPC_PPC_ASM_H
5#define _ASM_POWERPC_PPC_ASM_H
6
7#include <linux/stringify.h>
8#include <asm/asm-compat.h>
9#include <asm/processor.h>
10#include <asm/ppc-opcode.h>
11#include <asm/firmware.h>
12
13#ifndef __ASSEMBLY__
14#error __FILE__ should only be used in assembler files
15#else
16
17#define SZL			(BITS_PER_LONG/8)
18
19/*
20 * Stuff for accurate CPU time accounting.
21 * These macros handle transitions between user and system state
22 * in exception entry and exit and accumulate time to the
23 * user_time and system_time fields in the paca.
24 */
25
26#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
27#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
28#define ACCOUNT_CPU_USER_EXIT(ra, rb)
29#define ACCOUNT_STOLEN_TIME
30#else
31#define ACCOUNT_CPU_USER_ENTRY(ra, rb)					\
32	MFTB(ra);			/* get timebase */		\
33	ld	rb,PACA_STARTTIME_USER(r13);				\
34	std	ra,PACA_STARTTIME(r13);					\
35	subf	rb,rb,ra;		/* subtract start value */	\
36	ld	ra,PACA_USER_TIME(r13);					\
37	add	ra,ra,rb;		/* add on to user time */	\
38	std	ra,PACA_USER_TIME(r13);					\
39
40#define ACCOUNT_CPU_USER_EXIT(ra, rb)					\
41	MFTB(ra);			/* get timebase */		\
42	ld	rb,PACA_STARTTIME(r13);					\
43	std	ra,PACA_STARTTIME_USER(r13);				\
44	subf	rb,rb,ra;		/* subtract start value */	\
45	ld	ra,PACA_SYSTEM_TIME(r13);				\
46	add	ra,ra,rb;		/* add on to system time */	\
47	std	ra,PACA_SYSTEM_TIME(r13)
48
49#ifdef CONFIG_PPC_SPLPAR
50#define ACCOUNT_STOLEN_TIME						\
51BEGIN_FW_FTR_SECTION;							\
52	beq	33f;							\
53	/* from user - see if there are any DTL entries to process */	\
54	ld	r10,PACALPPACAPTR(r13);	/* get ptr to VPA */		\
55	ld	r11,PACA_DTL_RIDX(r13);	/* get log read index */	\
56	addi	r10,r10,LPPACA_DTLIDX;					\
57	LDX_BE	r10,0,r10;		/* get log write index */	\
58	cmpd	cr1,r11,r10;						\
59	beq+	cr1,33f;						\
60	bl	accumulate_stolen_time;				\
61	ld	r12,_MSR(r1);						\
62	andi.	r10,r12,MSR_PR;		/* Restore cr0 (coming from user) */ \
6333:									\
64END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
65
66#else  /* CONFIG_PPC_SPLPAR */
67#define ACCOUNT_STOLEN_TIME
68
69#endif /* CONFIG_PPC_SPLPAR */
70
71#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
72
73/*
74 * Macros for storing registers into and loading registers from
75 * exception frames.
76 */
77#ifdef __powerpc64__
78#define SAVE_GPR(n, base)	std	n,GPR0+8*(n)(base)
79#define REST_GPR(n, base)	ld	n,GPR0+8*(n)(base)
80#define SAVE_NVGPRS(base)	SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
81#define REST_NVGPRS(base)	REST_8GPRS(14, base); REST_10GPRS(22, base)
82#else
83#define SAVE_GPR(n, base)	stw	n,GPR0+4*(n)(base)
84#define REST_GPR(n, base)	lwz	n,GPR0+4*(n)(base)
85#define SAVE_NVGPRS(base)	SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
86				SAVE_10GPRS(22, base)
87#define REST_NVGPRS(base)	REST_GPR(13, base); REST_8GPRS(14, base); \
88				REST_10GPRS(22, base)
89#endif
90
91#define SAVE_2GPRS(n, base)	SAVE_GPR(n, base); SAVE_GPR(n+1, base)
92#define SAVE_4GPRS(n, base)	SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
93#define SAVE_8GPRS(n, base)	SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
94#define SAVE_10GPRS(n, base)	SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
95#define REST_2GPRS(n, base)	REST_GPR(n, base); REST_GPR(n+1, base)
96#define REST_4GPRS(n, base)	REST_2GPRS(n, base); REST_2GPRS(n+2, base)
97#define REST_8GPRS(n, base)	REST_4GPRS(n, base); REST_4GPRS(n+4, base)
98#define REST_10GPRS(n, base)	REST_8GPRS(n, base); REST_2GPRS(n+8, base)
99
100#define SAVE_FPR(n, base)	stfd	n,8*TS_FPRWIDTH*(n)(base)
101#define SAVE_2FPRS(n, base)	SAVE_FPR(n, base); SAVE_FPR(n+1, base)
102#define SAVE_4FPRS(n, base)	SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
103#define SAVE_8FPRS(n, base)	SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
104#define SAVE_16FPRS(n, base)	SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
105#define SAVE_32FPRS(n, base)	SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
106#define REST_FPR(n, base)	lfd	n,8*TS_FPRWIDTH*(n)(base)
107#define REST_2FPRS(n, base)	REST_FPR(n, base); REST_FPR(n+1, base)
108#define REST_4FPRS(n, base)	REST_2FPRS(n, base); REST_2FPRS(n+2, base)
109#define REST_8FPRS(n, base)	REST_4FPRS(n, base); REST_4FPRS(n+4, base)
110#define REST_16FPRS(n, base)	REST_8FPRS(n, base); REST_8FPRS(n+8, base)
111#define REST_32FPRS(n, base)	REST_16FPRS(n, base); REST_16FPRS(n+16, base)
112
113#define SAVE_VR(n,b,base)	li b,16*(n);  stvx n,base,b
114#define SAVE_2VRS(n,b,base)	SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
115#define SAVE_4VRS(n,b,base)	SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
116#define SAVE_8VRS(n,b,base)	SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
117#define SAVE_16VRS(n,b,base)	SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
118#define SAVE_32VRS(n,b,base)	SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
119#define REST_VR(n,b,base)	li b,16*(n); lvx n,base,b
120#define REST_2VRS(n,b,base)	REST_VR(n,b,base); REST_VR(n+1,b,base)
121#define REST_4VRS(n,b,base)	REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
122#define REST_8VRS(n,b,base)	REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
123#define REST_16VRS(n,b,base)	REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
124#define REST_32VRS(n,b,base)	REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
125
126#ifdef __BIG_ENDIAN__
127#define STXVD2X_ROT(n,b,base)		STXVD2X(n,b,base)
128#define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base)
129#else
130#define STXVD2X_ROT(n,b,base)		XXSWAPD(n,n);		\
131					STXVD2X(n,b,base);	\
132					XXSWAPD(n,n)
133
134#define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base);	\
135					XXSWAPD(n,n)
136#endif
137/* Save the lower 32 VSRs in the thread VSR region */
138#define SAVE_VSR(n,b,base)	li b,16*(n);  STXVD2X_ROT(n,R##base,R##b)
139#define SAVE_2VSRS(n,b,base)	SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
140#define SAVE_4VSRS(n,b,base)	SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
141#define SAVE_8VSRS(n,b,base)	SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
142#define SAVE_16VSRS(n,b,base)	SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
143#define SAVE_32VSRS(n,b,base)	SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
144#define REST_VSR(n,b,base)	li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
145#define REST_2VSRS(n,b,base)	REST_VSR(n,b,base); REST_VSR(n+1,b,base)
146#define REST_4VSRS(n,b,base)	REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
147#define REST_8VSRS(n,b,base)	REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
148#define REST_16VSRS(n,b,base)	REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
149#define REST_32VSRS(n,b,base)	REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
150
151/*
152 * b = base register for addressing, o = base offset from register of 1st EVR
153 * n = first EVR, s = scratch
154 */
155#define SAVE_EVR(n,s,b,o)	evmergehi s,s,n; stw s,o+4*(n)(b)
156#define SAVE_2EVRS(n,s,b,o)	SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
157#define SAVE_4EVRS(n,s,b,o)	SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
158#define SAVE_8EVRS(n,s,b,o)	SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
159#define SAVE_16EVRS(n,s,b,o)	SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
160#define SAVE_32EVRS(n,s,b,o)	SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
161#define REST_EVR(n,s,b,o)	lwz s,o+4*(n)(b); evmergelo n,s,n
162#define REST_2EVRS(n,s,b,o)	REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
163#define REST_4EVRS(n,s,b,o)	REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
164#define REST_8EVRS(n,s,b,o)	REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
165#define REST_16EVRS(n,s,b,o)	REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
166#define REST_32EVRS(n,s,b,o)	REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
167
168/* Macros to adjust thread priority for hardware multithreading */
169#define HMT_VERY_LOW	or	31,31,31	# very low priority
170#define HMT_LOW		or	1,1,1
171#define HMT_MEDIUM_LOW  or	6,6,6		# medium low priority
172#define HMT_MEDIUM	or	2,2,2
173#define HMT_MEDIUM_HIGH or	5,5,5		# medium high priority
174#define HMT_HIGH	or	3,3,3
175#define HMT_EXTRA_HIGH	or	7,7,7		# power7 only
176
177#ifdef CONFIG_PPC64
178#define ULONG_SIZE 	8
179#else
180#define ULONG_SIZE	4
181#endif
182#define __VCPU_GPR(n)	(VCPU_GPRS + (n * ULONG_SIZE))
183#define VCPU_GPR(n)	__VCPU_GPR(__REG_##n)
184
185#ifdef __KERNEL__
186#ifdef CONFIG_PPC64
187
188#define STACKFRAMESIZE 256
189#define __STK_REG(i)   (112 + ((i)-14)*8)
190#define STK_REG(i)     __STK_REG(__REG_##i)
191
192#if defined(_CALL_ELF) && _CALL_ELF == 2
193#define STK_GOT		24
194#define __STK_PARAM(i)	(32 + ((i)-3)*8)
195#else
196#define STK_GOT		40
197#define __STK_PARAM(i)	(48 + ((i)-3)*8)
198#endif
199#define STK_PARAM(i)	__STK_PARAM(__REG_##i)
200
201#if defined(_CALL_ELF) && _CALL_ELF == 2
202
203#define _GLOBAL(name) \
204	.section ".text"; \
205	.align 2 ; \
206	.type name,@function; \
207	.globl name; \
208name:
209
210#define _GLOBAL_TOC(name) \
211	.section ".text"; \
212	.align 2 ; \
213	.type name,@function; \
214	.globl name; \
215name: \
2160:	addis r2,r12,(.TOC.-0b)@ha; \
217	addi r2,r2,(.TOC.-0b)@l; \
218	.localentry name,.-name
219
220#define _KPROBE(name) \
221	.section ".kprobes.text","a"; \
222	.align 2 ; \
223	.type name,@function; \
224	.globl name; \
225name:
226
227#define DOTSYM(a)	a
228
229#else
230
231#define XGLUE(a,b) a##b
232#define GLUE(a,b) XGLUE(a,b)
233
234#define _GLOBAL(name) \
235	.section ".text"; \
236	.align 2 ; \
237	.globl name; \
238	.globl GLUE(.,name); \
239	.section ".opd","aw"; \
240name: \
241	.quad GLUE(.,name); \
242	.quad .TOC.@tocbase; \
243	.quad 0; \
244	.previous; \
245	.type GLUE(.,name),@function; \
246GLUE(.,name):
247
248#define _GLOBAL_TOC(name) _GLOBAL(name)
249
250#define _KPROBE(name) \
251	.section ".kprobes.text","a"; \
252	.align 2 ; \
253	.globl name; \
254	.globl GLUE(.,name); \
255	.section ".opd","aw"; \
256name: \
257	.quad GLUE(.,name); \
258	.quad .TOC.@tocbase; \
259	.quad 0; \
260	.previous; \
261	.type GLUE(.,name),@function; \
262GLUE(.,name):
263
264#define DOTSYM(a)	GLUE(.,a)
265
266#endif
267
268#else /* 32-bit */
269
270#define _ENTRY(n)	\
271	.globl n;	\
272n:
273
274#define _GLOBAL(n)	\
275	.text;		\
276	.stabs __stringify(n:F-1),N_FUN,0,0,n;\
277	.globl n;	\
278n:
279
280#define _GLOBAL_TOC(name) _GLOBAL(name)
281
282#define _KPROBE(n)	\
283	.section ".kprobes.text","a";	\
284	.globl	n;	\
285n:
286
287#endif
288
289/*
290 * LOAD_REG_IMMEDIATE(rn, expr)
291 *   Loads the value of the constant expression 'expr' into register 'rn'
292 *   using immediate instructions only.  Use this when it's important not
293 *   to reference other data (i.e. on ppc64 when the TOC pointer is not
294 *   valid) and when 'expr' is a constant or absolute address.
295 *
296 * LOAD_REG_ADDR(rn, name)
297 *   Loads the address of label 'name' into register 'rn'.  Use this when
298 *   you don't particularly need immediate instructions only, but you need
299 *   the whole address in one register (e.g. it's a structure address and
300 *   you want to access various offsets within it).  On ppc32 this is
301 *   identical to LOAD_REG_IMMEDIATE.
302 *
303 * LOAD_REG_ADDR_PIC(rn, name)
304 *   Loads the address of label 'name' into register 'run'. Use this when
305 *   the kernel doesn't run at the linked or relocated address. Please
306 *   note that this macro will clobber the lr register.
307 *
308 * LOAD_REG_ADDRBASE(rn, name)
309 * ADDROFF(name)
310 *   LOAD_REG_ADDRBASE loads part of the address of label 'name' into
311 *   register 'rn'.  ADDROFF(name) returns the remainder of the address as
312 *   a constant expression.  ADDROFF(name) is a signed expression < 16 bits
313 *   in size, so is suitable for use directly as an offset in load and store
314 *   instructions.  Use this when loading/storing a single word or less as:
315 *      LOAD_REG_ADDRBASE(rX, name)
316 *      ld	rY,ADDROFF(name)(rX)
317 */
318
319/* Be careful, this will clobber the lr register. */
320#define LOAD_REG_ADDR_PIC(reg, name)		\
321	bl	0f;				\
3220:	mflr	reg;				\
323	addis	reg,reg,(name - 0b)@ha;		\
324	addi	reg,reg,(name - 0b)@l;
325
326#ifdef __powerpc64__
327#ifdef HAVE_AS_ATHIGH
328#define __AS_ATHIGH high
329#else
330#define __AS_ATHIGH h
331#endif
332#define LOAD_REG_IMMEDIATE(reg,expr)		\
333	lis     reg,(expr)@highest;		\
334	ori     reg,reg,(expr)@higher;	\
335	rldicr  reg,reg,32,31;		\
336	oris    reg,reg,(expr)@__AS_ATHIGH;	\
337	ori     reg,reg,(expr)@l;
338
339#define LOAD_REG_ADDR(reg,name)			\
340	ld	reg,name@got(r2)
341
342#define LOAD_REG_ADDRBASE(reg,name)	LOAD_REG_ADDR(reg,name)
343#define ADDROFF(name)			0
344
345/* offsets for stack frame layout */
346#define LRSAVE	16
347
348#else /* 32-bit */
349
350#define LOAD_REG_IMMEDIATE(reg,expr)		\
351	lis	reg,(expr)@ha;		\
352	addi	reg,reg,(expr)@l;
353
354#define LOAD_REG_ADDR(reg,name)		LOAD_REG_IMMEDIATE(reg, name)
355
356#define LOAD_REG_ADDRBASE(reg, name)	lis	reg,name@ha
357#define ADDROFF(name)			name@l
358
359/* offsets for stack frame layout */
360#define LRSAVE	4
361
362#endif
363
364/* various errata or part fixups */
365#ifdef CONFIG_PPC601_SYNC_FIX
366#define SYNC				\
367BEGIN_FTR_SECTION			\
368	sync;				\
369	isync;				\
370END_FTR_SECTION_IFSET(CPU_FTR_601)
371#define SYNC_601			\
372BEGIN_FTR_SECTION			\
373	sync;				\
374END_FTR_SECTION_IFSET(CPU_FTR_601)
375#define ISYNC_601			\
376BEGIN_FTR_SECTION			\
377	isync;				\
378END_FTR_SECTION_IFSET(CPU_FTR_601)
379#else
380#define	SYNC
381#define SYNC_601
382#define ISYNC_601
383#endif
384
385#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
386#define MFTB(dest)			\
38790:	mfspr dest, SPRN_TBRL;		\
388BEGIN_FTR_SECTION_NESTED(96);		\
389	cmpwi dest,0;			\
390	beq-  90b;			\
391END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
392#elif defined(CONFIG_8xx)
393#define MFTB(dest)			mftb dest
394#else
395#define MFTB(dest)			mfspr dest, SPRN_TBRL
396#endif
397
398#ifndef CONFIG_SMP
399#define TLBSYNC
400#else /* CONFIG_SMP */
401/* tlbsync is not implemented on 601 */
402#define TLBSYNC				\
403BEGIN_FTR_SECTION			\
404	tlbsync;			\
405	sync;				\
406END_FTR_SECTION_IFCLR(CPU_FTR_601)
407#endif
408
409#ifdef CONFIG_PPC64
410#define MTOCRF(FXM, RS)			\
411	BEGIN_FTR_SECTION_NESTED(848);	\
412	mtcrf	(FXM), RS;		\
413	FTR_SECTION_ELSE_NESTED(848);	\
414	mtocrf (FXM), RS;		\
415	ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
416
417/*
418 * PPR restore macros used in entry_64.S
419 * Used for P7 or later processors
420 */
421#define HMT_MEDIUM_LOW_HAS_PPR						\
422BEGIN_FTR_SECTION_NESTED(944)						\
423	HMT_MEDIUM_LOW;							\
424END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944)
425
426#define SET_DEFAULT_THREAD_PPR(ra, rb)					\
427BEGIN_FTR_SECTION_NESTED(945)						\
428	lis	ra,INIT_PPR@highest;	/* default ppr=3 */		\
429	ld	rb,PACACURRENT(r13);					\
430	sldi	ra,ra,32;	/* 11- 13 bits are used for ppr */	\
431	std	ra,TASKTHREADPPR(rb);					\
432END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
433
434#endif
435
436/*
437 * This instruction is not implemented on the PPC 603 or 601; however, on
438 * the 403GCX and 405GP tlbia IS defined and tlbie is not.
439 * All of these instructions exist in the 8xx, they have magical powers,
440 * and they must be used.
441 */
442
443#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
444#define tlbia					\
445	li	r4,1024;			\
446	mtctr	r4;				\
447	lis	r4,KERNELBASE@h;		\
4480:	tlbie	r4;				\
449	addi	r4,r4,0x1000;			\
450	bdnz	0b
451#endif
452
453
454#ifdef CONFIG_IBM440EP_ERR42
455#define PPC440EP_ERR42 isync
456#else
457#define PPC440EP_ERR42
458#endif
459
460/* The following stops all load and store data streams associated with stream
461 * ID (ie. streams created explicitly).  The embedded and server mnemonics for
462 * dcbt are different so we use machine "power4" here explicitly.
463 */
464#define DCBT_STOP_ALL_STREAM_IDS(scratch)	\
465.machine push ;					\
466.machine "power4" ;				\
467       lis     scratch,0x60000000@h;		\
468       dcbt    r0,scratch,0b01010;		\
469.machine pop
470
471/*
472 * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
473 * keep the address intact to be compatible with code shared with
474 * 32-bit classic.
475 *
476 * On the other hand, I find it useful to have them behave as expected
477 * by their name (ie always do the addition) on 64-bit BookE
478 */
479#if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
480#define toreal(rd)
481#define fromreal(rd)
482
483/*
484 * We use addis to ensure compatibility with the "classic" ppc versions of
485 * these macros, which use rs = 0 to get the tophys offset in rd, rather than
486 * converting the address in r0, and so this version has to do that too
487 * (i.e. set register rd to 0 when rs == 0).
488 */
489#define tophys(rd,rs)				\
490	addis	rd,rs,0
491
492#define tovirt(rd,rs)				\
493	addis	rd,rs,0
494
495#elif defined(CONFIG_PPC64)
496#define toreal(rd)		/* we can access c000... in real mode */
497#define fromreal(rd)
498
499#define tophys(rd,rs)                           \
500	clrldi	rd,rs,2
501
502#define tovirt(rd,rs)                           \
503	rotldi	rd,rs,16;			\
504	ori	rd,rd,((KERNELBASE>>48)&0xFFFF);\
505	rotldi	rd,rd,48
506#else
507/*
508 * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
509 * physical base address of RAM at compile time.
510 */
511#define toreal(rd)	tophys(rd,rd)
512#define fromreal(rd)	tovirt(rd,rd)
513
514#define tophys(rd,rs)				\
5150:	addis	rd,rs,-PAGE_OFFSET@h;		\
516	.section ".vtop_fixup","aw";		\
517	.align  1;				\
518	.long   0b;				\
519	.previous
520
521#define tovirt(rd,rs)				\
5220:	addis	rd,rs,PAGE_OFFSET@h;		\
523	.section ".ptov_fixup","aw";		\
524	.align  1;				\
525	.long   0b;				\
526	.previous
527#endif
528
529#ifdef CONFIG_PPC_BOOK3S_64
530#define RFI		rfid
531#define MTMSRD(r)	mtmsrd	r
532#define MTMSR_EERI(reg)	mtmsrd	reg,1
533#else
534#define FIX_SRR1(ra, rb)
535#ifndef CONFIG_40x
536#define	RFI		rfi
537#else
538#define RFI		rfi; b .	/* Prevent prefetch past rfi */
539#endif
540#define MTMSRD(r)	mtmsr	r
541#define MTMSR_EERI(reg)	mtmsr	reg
542#define CLR_TOP32(r)
543#endif
544
545#endif /* __KERNEL__ */
546
547/* The boring bits... */
548
549/* Condition Register Bit Fields */
550
551#define	cr0	0
552#define	cr1	1
553#define	cr2	2
554#define	cr3	3
555#define	cr4	4
556#define	cr5	5
557#define	cr6	6
558#define	cr7	7
559
560
561/*
562 * General Purpose Registers (GPRs)
563 *
564 * The lower case r0-r31 should be used in preference to the upper
565 * case R0-R31 as they provide more error checking in the assembler.
566 * Use R0-31 only when really nessesary.
567 */
568
569#define	r0	%r0
570#define	r1	%r1
571#define	r2	%r2
572#define	r3	%r3
573#define	r4	%r4
574#define	r5	%r5
575#define	r6	%r6
576#define	r7	%r7
577#define	r8	%r8
578#define	r9	%r9
579#define	r10	%r10
580#define	r11	%r11
581#define	r12	%r12
582#define	r13	%r13
583#define	r14	%r14
584#define	r15	%r15
585#define	r16	%r16
586#define	r17	%r17
587#define	r18	%r18
588#define	r19	%r19
589#define	r20	%r20
590#define	r21	%r21
591#define	r22	%r22
592#define	r23	%r23
593#define	r24	%r24
594#define	r25	%r25
595#define	r26	%r26
596#define	r27	%r27
597#define	r28	%r28
598#define	r29	%r29
599#define	r30	%r30
600#define	r31	%r31
601
602
603/* Floating Point Registers (FPRs) */
604
605#define	fr0	0
606#define	fr1	1
607#define	fr2	2
608#define	fr3	3
609#define	fr4	4
610#define	fr5	5
611#define	fr6	6
612#define	fr7	7
613#define	fr8	8
614#define	fr9	9
615#define	fr10	10
616#define	fr11	11
617#define	fr12	12
618#define	fr13	13
619#define	fr14	14
620#define	fr15	15
621#define	fr16	16
622#define	fr17	17
623#define	fr18	18
624#define	fr19	19
625#define	fr20	20
626#define	fr21	21
627#define	fr22	22
628#define	fr23	23
629#define	fr24	24
630#define	fr25	25
631#define	fr26	26
632#define	fr27	27
633#define	fr28	28
634#define	fr29	29
635#define	fr30	30
636#define	fr31	31
637
638/* AltiVec Registers (VPRs) */
639
640#define	v0	0
641#define	v1	1
642#define	v2	2
643#define	v3	3
644#define	v4	4
645#define	v5	5
646#define	v6	6
647#define	v7	7
648#define	v8	8
649#define	v9	9
650#define	v10	10
651#define	v11	11
652#define	v12	12
653#define	v13	13
654#define	v14	14
655#define	v15	15
656#define	v16	16
657#define	v17	17
658#define	v18	18
659#define	v19	19
660#define	v20	20
661#define	v21	21
662#define	v22	22
663#define	v23	23
664#define	v24	24
665#define	v25	25
666#define	v26	26
667#define	v27	27
668#define	v28	28
669#define	v29	29
670#define	v30	30
671#define	v31	31
672
673/* VSX Registers (VSRs) */
674
675#define	vs0	0
676#define	vs1	1
677#define	vs2	2
678#define	vs3	3
679#define	vs4	4
680#define	vs5	5
681#define	vs6	6
682#define	vs7	7
683#define	vs8	8
684#define	vs9	9
685#define	vs10	10
686#define	vs11	11
687#define	vs12	12
688#define	vs13	13
689#define	vs14	14
690#define	vs15	15
691#define	vs16	16
692#define	vs17	17
693#define	vs18	18
694#define	vs19	19
695#define	vs20	20
696#define	vs21	21
697#define	vs22	22
698#define	vs23	23
699#define	vs24	24
700#define	vs25	25
701#define	vs26	26
702#define	vs27	27
703#define	vs28	28
704#define	vs29	29
705#define	vs30	30
706#define	vs31	31
707#define	vs32	32
708#define	vs33	33
709#define	vs34	34
710#define	vs35	35
711#define	vs36	36
712#define	vs37	37
713#define	vs38	38
714#define	vs39	39
715#define	vs40	40
716#define	vs41	41
717#define	vs42	42
718#define	vs43	43
719#define	vs44	44
720#define	vs45	45
721#define	vs46	46
722#define	vs47	47
723#define	vs48	48
724#define	vs49	49
725#define	vs50	50
726#define	vs51	51
727#define	vs52	52
728#define	vs53	53
729#define	vs54	54
730#define	vs55	55
731#define	vs56	56
732#define	vs57	57
733#define	vs58	58
734#define	vs59	59
735#define	vs60	60
736#define	vs61	61
737#define	vs62	62
738#define	vs63	63
739
740/* SPE Registers (EVPRs) */
741
742#define	evr0	0
743#define	evr1	1
744#define	evr2	2
745#define	evr3	3
746#define	evr4	4
747#define	evr5	5
748#define	evr6	6
749#define	evr7	7
750#define	evr8	8
751#define	evr9	9
752#define	evr10	10
753#define	evr11	11
754#define	evr12	12
755#define	evr13	13
756#define	evr14	14
757#define	evr15	15
758#define	evr16	16
759#define	evr17	17
760#define	evr18	18
761#define	evr19	19
762#define	evr20	20
763#define	evr21	21
764#define	evr22	22
765#define	evr23	23
766#define	evr24	24
767#define	evr25	25
768#define	evr26	26
769#define	evr27	27
770#define	evr28	28
771#define	evr29	29
772#define	evr30	30
773#define	evr31	31
774
775/* some stab codes */
776#define N_FUN	36
777#define N_RSYM	64
778#define N_SLINE	68
779#define N_SO	100
780
781/*
782 * Create an endian fixup trampoline
783 *
784 * This starts with a "tdi 0,0,0x48" instruction which is
785 * essentially a "trap never", and thus akin to a nop.
786 *
787 * The opcode for this instruction read with the wrong endian
788 * however results in a b . + 8
789 *
790 * So essentially we use that trick to execute the following
791 * trampoline in "reverse endian" if we are running with the
792 * MSR_LE bit set the "wrong" way for whatever endianness the
793 * kernel is built for.
794 */
795
796#ifdef CONFIG_PPC_BOOK3E
797#define FIXUP_ENDIAN
798#else
799#define FIXUP_ENDIAN						   \
800	tdi   0,0,0x48;	  /* Reverse endian of b . + 8		*/ \
801	b     $+36;	  /* Skip trampoline if endian is good	*/ \
802	.long 0x05009f42; /* bcl 20,31,$+4			*/ \
803	.long 0xa602487d; /* mflr r10				*/ \
804	.long 0x1c004a39; /* addi r10,r10,28			*/ \
805	.long 0xa600607d; /* mfmsr r11				*/ \
806	.long 0x01006b69; /* xori r11,r11,1			*/ \
807	.long 0xa6035a7d; /* mtsrr0 r10				*/ \
808	.long 0xa6037b7d; /* mtsrr1 r11				*/ \
809	.long 0x2400004c  /* rfid				*/
810#endif /* !CONFIG_PPC_BOOK3E */
811#endif /*  __ASSEMBLY__ */
812#endif /* _ASM_POWERPC_PPC_ASM_H */
813