1#ifndef _ASM_POWERPC_CACHE_H
2#define _ASM_POWERPC_CACHE_H
3
4#ifdef __KERNEL__
5
6
7/* bytes per L1 cache line */
8#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
9#define L1_CACHE_SHIFT		4
10#define MAX_COPY_PREFETCH	1
11#elif defined(CONFIG_PPC_E500MC)
12#define L1_CACHE_SHIFT		6
13#define MAX_COPY_PREFETCH	4
14#elif defined(CONFIG_PPC32)
15#define MAX_COPY_PREFETCH	4
16#if defined(CONFIG_PPC_47x)
17#define L1_CACHE_SHIFT		7
18#else
19#define L1_CACHE_SHIFT		5
20#endif
21#else /* CONFIG_PPC64 */
22#define L1_CACHE_SHIFT		7
23#endif
24
25#define	L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
26
27#define	SMP_CACHE_BYTES		L1_CACHE_BYTES
28
29#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
30struct ppc64_caches {
31	u32	dsize;			/* L1 d-cache size */
32	u32	dline_size;		/* L1 d-cache line size	*/
33	u32	log_dline_size;
34	u32	dlines_per_page;
35	u32	isize;			/* L1 i-cache size */
36	u32	iline_size;		/* L1 i-cache line size	*/
37	u32	log_iline_size;
38	u32	ilines_per_page;
39};
40
41extern struct ppc64_caches ppc64_caches;
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43
44#if defined(__ASSEMBLY__)
45/*
46 * For a snooping icache, we still need a dummy icbi to purge all the
47 * prefetched instructions from the ifetch buffers. We also need a sync
48 * before the icbi to order the the actual stores to memory that might
49 * have modified instructions with the icbi.
50 */
51#define PURGE_PREFETCHED_INS	\
52	sync;			\
53	icbi	0,r3;		\
54	sync;			\
55	isync
56
57#else
58#define __read_mostly __attribute__((__section__(".data..read_mostly")))
59
60#ifdef CONFIG_6xx
61extern long _get_L2CR(void);
62extern long _get_L3CR(void);
63extern void _set_L2CR(unsigned long);
64extern void _set_L3CR(unsigned long);
65#else
66#define _get_L2CR()	0L
67#define _get_L3CR()	0L
68#define _set_L2CR(val)	do { } while(0)
69#define _set_L3CR(val)	do { } while(0)
70#endif
71
72#endif /* !__ASSEMBLY__ */
73#endif /* __KERNEL__ */
74#endif /* _ASM_POWERPC_CACHE_H */
75