1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
19
20/* Must be 8 bytes in size. */
21#define op_t uint64_t
22
23/* Threshold value for when to enter the unrolled loops. */
24#define	OP_T_THRES	16
25
26#if CHIP_L2_LINE_SIZE() != 64
27#error "Assumes 64 byte line size"
28#endif
29
30/* How many cache lines ahead should we prefetch? */
31#define PREFETCH_LINES_AHEAD 4
32
33/*
34 * Provide "base versions" of load and store for the normal code path.
35 * The kernel provides other versions for userspace copies.
36 */
37#define ST(p, v) (*(p) = (v))
38#define LD(p) (*(p))
39
40#ifndef USERCOPY_FUNC
41#define ST1 ST
42#define ST2 ST
43#define ST4 ST
44#define ST8 ST
45#define LD1 LD
46#define LD2 LD
47#define LD4 LD
48#define LD8 LD
49#define RETVAL dstv
50void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n)
51#else
52/*
53 * Special kernel version will provide implementation of the LDn/STn
54 * macros to return a count of uncopied bytes due to mm fault.
55 */
56#define RETVAL 0
57int __attribute__((optimize("omit-frame-pointer")))
58USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
59#endif
60{
61	char *__restrict dst1 = (char *)dstv;
62	const char *__restrict src1 = (const char *)srcv;
63	const char *__restrict src1_end;
64	const char *__restrict prefetch;
65	op_t *__restrict dst8;    /* 8-byte pointer to destination memory. */
66	op_t final; /* Final bytes to write to trailing word, if any */
67	long i;
68
69	if (n < 16) {
70		for (; n; n--)
71			ST1(dst1++, LD1(src1++));
72		return RETVAL;
73	}
74
75	/*
76	 * Locate the end of source memory we will copy.  Don't
77	 * prefetch past this.
78	 */
79	src1_end = src1 + n - 1;
80
81	/* Prefetch ahead a few cache lines, but not past the end. */
82	prefetch = src1;
83	for (i = 0; i < PREFETCH_LINES_AHEAD; i++) {
84		__insn_prefetch(prefetch);
85		prefetch += CHIP_L2_LINE_SIZE();
86		prefetch = (prefetch < src1_end) ? prefetch : src1;
87	}
88
89	/* Copy bytes until dst is word-aligned. */
90	for (; (uintptr_t)dst1 & (sizeof(op_t) - 1); n--)
91		ST1(dst1++, LD1(src1++));
92
93	/* 8-byte pointer to destination memory. */
94	dst8 = (op_t *)dst1;
95
96	if (__builtin_expect((uintptr_t)src1 & (sizeof(op_t) - 1), 0)) {
97		/* Unaligned copy. */
98
99		op_t  tmp0 = 0, tmp1 = 0, tmp2, tmp3;
100		const op_t *src8 = (const op_t *) ((uintptr_t)src1 &
101						   -sizeof(op_t));
102		const void *srci = (void *)src1;
103		int m;
104
105		m = (CHIP_L2_LINE_SIZE() << 2) -
106			(((uintptr_t)dst8) & ((CHIP_L2_LINE_SIZE() << 2) - 1));
107		m = (n < m) ? n : m;
108		m /= sizeof(op_t);
109
110		/* Copy until 'dst' is cache-line-aligned. */
111		n -= (sizeof(op_t) * m);
112
113		switch (m % 4) {
114		case 0:
115			if (__builtin_expect(!m, 0))
116				goto _M0;
117			tmp1 = LD8(src8++);
118			tmp2 = LD8(src8++);
119			goto _8B3;
120		case 2:
121			m += 2;
122			tmp3 = LD8(src8++);
123			tmp0 = LD8(src8++);
124			goto _8B1;
125		case 3:
126			m += 1;
127			tmp2 = LD8(src8++);
128			tmp3 = LD8(src8++);
129			goto _8B2;
130		case 1:
131			m--;
132			tmp0 = LD8(src8++);
133			tmp1 = LD8(src8++);
134			if (__builtin_expect(!m, 0))
135				goto _8B0;
136		}
137
138		do {
139			tmp2 = LD8(src8++);
140			tmp0 =  __insn_dblalign(tmp0, tmp1, srci);
141			ST8(dst8++, tmp0);
142_8B3:
143			tmp3 = LD8(src8++);
144			tmp1 = __insn_dblalign(tmp1, tmp2, srci);
145			ST8(dst8++, tmp1);
146_8B2:
147			tmp0 = LD8(src8++);
148			tmp2 = __insn_dblalign(tmp2, tmp3, srci);
149			ST8(dst8++, tmp2);
150_8B1:
151			tmp1 = LD8(src8++);
152			tmp3 = __insn_dblalign(tmp3, tmp0, srci);
153			ST8(dst8++, tmp3);
154			m -= 4;
155		} while (m);
156
157_8B0:
158		tmp0 = __insn_dblalign(tmp0, tmp1, srci);
159		ST8(dst8++, tmp0);
160		src8--;
161
162_M0:
163		if (__builtin_expect(n >= CHIP_L2_LINE_SIZE(), 0)) {
164			op_t tmp4, tmp5, tmp6, tmp7, tmp8;
165
166			prefetch = ((const char *)src8) +
167				CHIP_L2_LINE_SIZE() * PREFETCH_LINES_AHEAD;
168
169			for (tmp0 = LD8(src8++); n >= CHIP_L2_LINE_SIZE();
170			     n -= CHIP_L2_LINE_SIZE()) {
171				/* Prefetch and advance to next line to
172				   prefetch, but don't go past the end.  */
173				__insn_prefetch(prefetch);
174
175				/* Make sure prefetch got scheduled
176				   earlier.  */
177				__asm__ ("" : : : "memory");
178
179				prefetch += CHIP_L2_LINE_SIZE();
180				prefetch = (prefetch < src1_end) ? prefetch :
181					(const char *) src8;
182
183				tmp1 = LD8(src8++);
184				tmp2 = LD8(src8++);
185				tmp3 = LD8(src8++);
186				tmp4 = LD8(src8++);
187				tmp5 = LD8(src8++);
188				tmp6 = LD8(src8++);
189				tmp7 = LD8(src8++);
190				tmp8 = LD8(src8++);
191
192				tmp0 = __insn_dblalign(tmp0, tmp1, srci);
193				tmp1 = __insn_dblalign(tmp1, tmp2, srci);
194				tmp2 = __insn_dblalign(tmp2, tmp3, srci);
195				tmp3 = __insn_dblalign(tmp3, tmp4, srci);
196				tmp4 = __insn_dblalign(tmp4, tmp5, srci);
197				tmp5 = __insn_dblalign(tmp5, tmp6, srci);
198				tmp6 = __insn_dblalign(tmp6, tmp7, srci);
199				tmp7 = __insn_dblalign(tmp7, tmp8, srci);
200
201				__insn_wh64(dst8);
202
203				ST8(dst8++, tmp0);
204				ST8(dst8++, tmp1);
205				ST8(dst8++, tmp2);
206				ST8(dst8++, tmp3);
207				ST8(dst8++, tmp4);
208				ST8(dst8++, tmp5);
209				ST8(dst8++, tmp6);
210				ST8(dst8++, tmp7);
211
212				tmp0 = tmp8;
213			}
214			src8--;
215		}
216
217		/* Copy the rest 8-byte chunks. */
218		if (n >= sizeof(op_t)) {
219			tmp0 = LD8(src8++);
220			for (; n >= sizeof(op_t); n -= sizeof(op_t)) {
221				tmp1 = LD8(src8++);
222				tmp0 = __insn_dblalign(tmp0, tmp1, srci);
223				ST8(dst8++, tmp0);
224				tmp0 = tmp1;
225			}
226			src8--;
227		}
228
229		if (n == 0)
230			return RETVAL;
231
232		tmp0 = LD8(src8++);
233		tmp1 = ((const char *)src8 <= src1_end)
234			? LD8((op_t *)src8) : 0;
235		final = __insn_dblalign(tmp0, tmp1, srci);
236
237	} else {
238		/* Aligned copy. */
239
240		const op_t *__restrict src8 = (const op_t *)src1;
241
242		/* src8 and dst8 are both word-aligned. */
243		if (n >= CHIP_L2_LINE_SIZE()) {
244			/* Copy until 'dst' is cache-line-aligned. */
245			for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1);
246			     n -= sizeof(op_t))
247				ST8(dst8++, LD8(src8++));
248
249			for (; n >= CHIP_L2_LINE_SIZE(); ) {
250				op_t tmp0, tmp1, tmp2, tmp3;
251				op_t tmp4, tmp5, tmp6, tmp7;
252
253				/*
254				 * Prefetch and advance to next line
255				 * to prefetch, but don't go past the
256				 * end.
257				 */
258				__insn_prefetch(prefetch);
259
260				/* Make sure prefetch got scheduled
261				   earlier.  */
262				__asm__ ("" : : : "memory");
263
264				prefetch += CHIP_L2_LINE_SIZE();
265				prefetch = (prefetch < src1_end) ? prefetch :
266					(const char *)src8;
267
268				/*
269				 * Do all the loads before wh64.  This
270				 * is necessary if [src8, src8+7] and
271				 * [dst8, dst8+7] share the same cache
272				 * line and dst8 <= src8, as can be
273				 * the case when called from memmove,
274				 * or with code tested on x86 whose
275				 * memcpy always works with forward
276				 * copies.
277				 */
278				tmp0 = LD8(src8++);
279				tmp1 = LD8(src8++);
280				tmp2 = LD8(src8++);
281				tmp3 = LD8(src8++);
282				tmp4 = LD8(src8++);
283				tmp5 = LD8(src8++);
284				tmp6 = LD8(src8++);
285				tmp7 = LD8(src8++);
286
287				/* wh64 and wait for tmp7 load completion. */
288				__asm__ ("move %0, %0; wh64 %1\n"
289					 : : "r"(tmp7), "r"(dst8));
290
291				ST8(dst8++, tmp0);
292				ST8(dst8++, tmp1);
293				ST8(dst8++, tmp2);
294				ST8(dst8++, tmp3);
295				ST8(dst8++, tmp4);
296				ST8(dst8++, tmp5);
297				ST8(dst8++, tmp6);
298				ST8(dst8++, tmp7);
299
300				n -= CHIP_L2_LINE_SIZE();
301			}
302#if CHIP_L2_LINE_SIZE() != 64
303# error "Fix code that assumes particular L2 cache line size."
304#endif
305		}
306
307		for (; n >= sizeof(op_t); n -= sizeof(op_t))
308			ST8(dst8++, LD8(src8++));
309
310		if (__builtin_expect(n == 0, 1))
311			return RETVAL;
312
313		final = LD8(src8);
314	}
315
316	/* n != 0 if we get here.  Write out any trailing bytes. */
317	dst1 = (char *)dst8;
318#ifndef __BIG_ENDIAN__
319	if (n & 4) {
320		ST4((uint32_t *)dst1, final);
321		dst1 += 4;
322		final >>= 32;
323		n &= 3;
324	}
325	if (n & 2) {
326		ST2((uint16_t *)dst1, final);
327		dst1 += 2;
328		final >>= 16;
329		n &= 1;
330	}
331	if (n)
332		ST1((uint8_t *)dst1, final);
333#else
334	if (n & 4) {
335		ST4((uint32_t *)dst1, final >> 32);
336		dst1 += 4;
337        }
338        else
339        {
340		final >>= 32;
341        }
342	if (n & 2) {
343		ST2((uint16_t *)dst1, final >> 16);
344		dst1 += 2;
345        }
346        else
347        {
348		final >>= 16;
349        }
350	if (n & 1)
351		ST1((uint8_t *)dst1, final >> 8);
352#endif
353
354	return RETVAL;
355}
356
357#ifdef USERCOPY_FUNC
358#undef ST1
359#undef ST2
360#undef ST4
361#undef ST8
362#undef LD1
363#undef LD2
364#undef LD4
365#undef LD8
366#undef USERCOPY_FUNC
367#endif
368