1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#include <arch/chip.h>
16
17
18/*
19 * This file shares the implementation of the userspace memcpy and
20 * the kernel's memcpy, copy_to_user and copy_from_user.
21 */
22
23#include <linux/linkage.h>
24
25#define IS_MEMCPY	  0
26#define IS_COPY_FROM_USER  1
27#define IS_COPY_FROM_USER_ZEROING  2
28#define IS_COPY_TO_USER   -1
29
30	.section .text.memcpy_common, "ax"
31	.align 64
32
33/* Use this to preface each bundle that can cause an exception so
34 * the kernel can clean up properly. The special cleanup code should
35 * not use these, since it knows what it is doing.
36 */
37#define EX \
38	.pushsection __ex_table, "a"; \
39	.align 4; \
40	.word 9f, memcpy_common_fixup; \
41	.popsection; \
42	9
43
44
45/* __copy_from_user_inatomic takes the kernel target address in r0,
46 * the user source in r1, and the bytes to copy in r2.
47 * It returns the number of uncopiable bytes (hopefully zero) in r0.
48 */
49ENTRY(__copy_from_user_inatomic)
50.type __copy_from_user_inatomic, @function
51	FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
52	  .text.memcpy_common, \
53	  .Lend_memcpy_common - __copy_from_user_inatomic)
54	{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
55	.size __copy_from_user_inatomic, . - __copy_from_user_inatomic
56
57/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but
58 * any uncopiable bytes are zeroed in the target.
59 */
60ENTRY(__copy_from_user_zeroing)
61.type __copy_from_user_zeroing, @function
62	FEEDBACK_REENTER(__copy_from_user_inatomic)
63	{ movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
64	.size __copy_from_user_zeroing, . - __copy_from_user_zeroing
65
66/* __copy_to_user_inatomic takes the user target address in r0,
67 * the kernel source in r1, and the bytes to copy in r2.
68 * It returns the number of uncopiable bytes (hopefully zero) in r0.
69 */
70ENTRY(__copy_to_user_inatomic)
71.type __copy_to_user_inatomic, @function
72	FEEDBACK_REENTER(__copy_from_user_inatomic)
73	{ movei r29, IS_COPY_TO_USER; j memcpy_common }
74	.size __copy_to_user_inatomic, . - __copy_to_user_inatomic
75
76ENTRY(memcpy)
77.type memcpy, @function
78	FEEDBACK_REENTER(__copy_from_user_inatomic)
79	{ movei r29, IS_MEMCPY }
80	.size memcpy, . - memcpy
81	/* Fall through */
82
83	.type memcpy_common, @function
84memcpy_common:
85	/* On entry, r29 holds one of the IS_* macro values from above. */
86
87
88	/* r0 is the dest, r1 is the source, r2 is the size. */
89
90	/* Save aside original dest so we can return it at the end. */
91	{ sw sp, lr; move r23, r0; or r4, r0, r1 }
92
93	/* Check for an empty size. */
94	{ bz r2, .Ldone; andi r4, r4, 3 }
95
96	/* Save aside original values in case of a fault. */
97	{ move r24, r1; move r25, r2 }
98	move r27, lr
99
100	/* Check for an unaligned source or dest. */
101	{ bnz r4, .Lcopy_unaligned_maybe_many; addli r4, r2, -256 }
102
103.Lcheck_aligned_copy_size:
104	/* If we are copying < 256 bytes, branch to simple case. */
105	{ blzt r4, .Lcopy_8_check; slti_u r8, r2, 8 }
106
107	/* Copying >= 256 bytes, so jump to complex prefetching loop. */
108	{ andi r6, r1, 63; j .Lcopy_many }
109
110/*
111 *
112 * Aligned 4 byte at a time copy loop
113 *
114 */
115
116.Lcopy_8_loop:
117	/* Copy two words at a time to hide load latency. */
118EX:	{ lw r3, r1; addi r1, r1, 4; slti_u r8, r2, 16 }
119EX:	{ lw r4, r1; addi r1, r1, 4 }
120EX:	{ sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
121EX:	{ sw r0, r4; addi r0, r0, 4; addi r2, r2, -4 }
122.Lcopy_8_check:
123	{ bzt r8, .Lcopy_8_loop; slti_u r4, r2, 4 }
124
125	/* Copy odd leftover word, if any. */
126	{ bnzt r4, .Lcheck_odd_stragglers }
127EX:	{ lw r3, r1; addi r1, r1, 4 }
128EX:	{ sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
129
130.Lcheck_odd_stragglers:
131	{ bnz r2, .Lcopy_unaligned_few }
132
133.Ldone:
134	/* For memcpy return original dest address, else zero. */
135	{ mz r0, r29, r23; jrp lr }
136
137
138/*
139 *
140 * Prefetching multiple cache line copy handler (for large transfers).
141 *
142 */
143
144	/* Copy words until r1 is cache-line-aligned. */
145.Lalign_loop:
146EX:	{ lw r3, r1; addi r1, r1, 4 }
147	{ andi r6, r1, 63 }
148EX:	{ sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
149.Lcopy_many:
150	{ bnzt r6, .Lalign_loop; addi r9, r0, 63 }
151
152	{ addi r3, r1, 60; andi r9, r9, -64 }
153
154	/* No need to prefetch dst, we'll just do the wh64
155	 * right before we copy a line.
156	 */
157EX:	{ lw r5, r3; addi r3, r3, 64; movei r4, 1 }
158	/* Intentionally stall for a few cycles to leave L2 cache alone. */
159	{ bnzt zero, .; move r27, lr }
160EX:	{ lw r6, r3; addi r3, r3, 64 }
161	/* Intentionally stall for a few cycles to leave L2 cache alone. */
162	{ bnzt zero, . }
163EX:	{ lw r7, r3; addi r3, r3, 64 }
164	/* Intentionally stall for a few cycles to leave L2 cache alone. */
165	{ bz zero, .Lbig_loop2 }
166
167	/* On entry to this loop:
168	 * - r0 points to the start of dst line 0
169	 * - r1 points to start of src line 0
170	 * - r2 >= (256 - 60), only the first time the loop trips.
171	 * - r3 contains r1 + 128 + 60    [pointer to end of source line 2]
172	 *   This is our prefetch address. When we get near the end
173	 *   rather than prefetching off the end this is changed to point
174	 *   to some "safe" recently loaded address.
175	 * - r5 contains *(r1 + 60)       [i.e. last word of source line 0]
176	 * - r6 contains *(r1 + 64 + 60)  [i.e. last word of source line 1]
177	 * - r9 contains ((r0 + 63) & -64)
178	 *     [start of next dst cache line.]
179	 */
180
181.Lbig_loop:
182	{ jal .Lcopy_line2; add r15, r1, r2 }
183
184.Lbig_loop2:
185	/* Copy line 0, first stalling until r5 is ready. */
186EX:	{ move r12, r5; lw r16, r1 }
187	{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
188	/* Prefetch several lines ahead. */
189EX:	{ lw r5, r3; addi r3, r3, 64 }
190	{ jal .Lcopy_line }
191
192	/* Copy line 1, first stalling until r6 is ready. */
193EX:	{ move r12, r6; lw r16, r1 }
194	{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
195	/* Prefetch several lines ahead. */
196EX:	{ lw r6, r3; addi r3, r3, 64 }
197	{ jal .Lcopy_line }
198
199	/* Copy line 2, first stalling until r7 is ready. */
200EX:	{ move r12, r7; lw r16, r1 }
201	{ bz r4, .Lcopy_8_check; slti_u r8, r2, 8 }
202	/* Prefetch several lines ahead. */
203EX:	{ lw r7, r3; addi r3, r3, 64 }
204	/* Use up a caches-busy cycle by jumping back to the top of the
205	 * loop. Might as well get it out of the way now.
206	 */
207	{ j .Lbig_loop }
208
209
210	/* On entry:
211	 * - r0 points to the destination line.
212	 * - r1 points to the source line.
213	 * - r3 is the next prefetch address.
214	 * - r9 holds the last address used for wh64.
215	 * - r12 = WORD_15
216	 * - r16 = WORD_0.
217	 * - r17 == r1 + 16.
218	 * - r27 holds saved lr to restore.
219	 *
220	 * On exit:
221	 * - r0 is incremented by 64.
222	 * - r1 is incremented by 64, unless that would point to a word
223	 *   beyond the end of the source array, in which case it is redirected
224	 *   to point to an arbitrary word already in the cache.
225	 * - r2 is decremented by 64.
226	 * - r3 is unchanged, unless it points to a word beyond the
227	 *   end of the source array, in which case it is redirected
228	 *   to point to an arbitrary word already in the cache.
229	 *   Redirecting is OK since if we are that close to the end
230	 *   of the array we will not come back to this subroutine
231	 *   and use the contents of the prefetched address.
232	 * - r4 is nonzero iff r2 >= 64.
233	 * - r9 is incremented by 64, unless it points beyond the
234	 *   end of the last full destination cache line, in which
235	 *   case it is redirected to a "safe address" that can be
236	 *   clobbered (sp - 64)
237	 * - lr contains the value in r27.
238	 */
239
240/* r26 unused */
241
242.Lcopy_line:
243	/* TODO: when r3 goes past the end, we would like to redirect it
244	 * to prefetch the last partial cache line (if any) just once, for the
245	 * benefit of the final cleanup loop. But we don't want to
246	 * prefetch that line more than once, or subsequent prefetches
247	 * will go into the RTF. But then .Lbig_loop should unconditionally
248	 * branch to top of loop to execute final prefetch, and its
249	 * nop should become a conditional branch.
250	 */
251
252	/* We need two non-memory cycles here to cover the resources
253	 * used by the loads initiated by the caller.
254	 */
255	{ add r15, r1, r2 }
256.Lcopy_line2:
257	{ slt_u r13, r3, r15; addi r17, r1, 16 }
258
259	/* NOTE: this will stall for one cycle as L1 is busy. */
260
261	/* Fill second L1D line. */
262EX:	{ lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
263
264	/* Prepare destination line for writing. */
265EX:	{ wh64 r9; addi r9, r9, 64 }
266	/* Load seven words that are L1D hits to cover wh64 L2 usage. */
267
268	/* Load the three remaining words from the last L1D line, which
269	 * we know has already filled the L1D.
270	 */
271EX:	{ lw r4, r1;  addi r1, r1, 4;   addi r20, r1, 16 }   /* r4 = WORD_12 */
272EX:	{ lw r8, r1;  addi r1, r1, 4;   slt_u r13, r20, r15 }/* r8 = WORD_13 */
273EX:	{ lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 }  /* r11 = WORD_14 */
274
275	/* Load the three remaining words from the first L1D line, first
276	 * stalling until it has filled by "looking at" r16.
277	 */
278EX:	{ lw r13, r1; addi r1, r1, 4; move zero, r16 }   /* r13 = WORD_1 */
279EX:	{ lw r14, r1; addi r1, r1, 4 }                   /* r14 = WORD_2 */
280EX:	{ lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */
281
282	/* Load second word from the second L1D line, first
283	 * stalling until it has filled by "looking at" r17.
284	 */
285EX:	{ lw r19, r1; addi r1, r1, 4; move zero, r17 }  /* r19 = WORD_5 */
286
287	/* Store last word to the destination line, potentially dirtying it
288	 * for the first time, which keeps the L2 busy for two cycles.
289	 */
290EX:	{ sw r10, r12 }                                 /* store(WORD_15) */
291
292	/* Use two L1D hits to cover the sw L2 access above. */
293EX:	{ lw r10, r1; addi r1, r1, 4 }                  /* r10 = WORD_6 */
294EX:	{ lw r12, r1; addi r1, r1, 4 }                  /* r12 = WORD_7 */
295
296	/* Fill third L1D line. */
297EX:	{ lw r18, r1; addi r1, r1, 4 }                  /* r18 = WORD_8 */
298
299	/* Store first L1D line. */
300EX:	{ sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
301EX:	{ sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
302EX:	{ sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
303EX:	{ sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
304	/* Store second L1D line. */
305EX:	{ sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
306EX:	{ sw r0, r19; addi r0, r0, 4 }                  /* store(WORD_5) */
307EX:	{ sw r0, r10; addi r0, r0, 4 }                  /* store(WORD_6) */
308EX:	{ sw r0, r12; addi r0, r0, 4 }                  /* store(WORD_7) */
309
310EX:	{ lw r13, r1; addi r1, r1, 4; move zero, r18 }  /* r13 = WORD_9 */
311EX:	{ lw r14, r1; addi r1, r1, 4 }                  /* r14 = WORD_10 */
312EX:	{ lw r15, r1; move r1, r20   }                  /* r15 = WORD_11 */
313
314	/* Store third L1D line. */
315EX:	{ sw r0, r18; addi r0, r0, 4 }                  /* store(WORD_8) */
316EX:	{ sw r0, r13; addi r0, r0, 4 }                  /* store(WORD_9) */
317EX:	{ sw r0, r14; addi r0, r0, 4 }                  /* store(WORD_10) */
318EX:	{ sw r0, r15; addi r0, r0, 4 }                  /* store(WORD_11) */
319
320	/* Store rest of fourth L1D line. */
321EX:	{ sw r0, r4;  addi r0, r0, 4 }                  /* store(WORD_12) */
322	{
323EX:	sw r0, r8                                       /* store(WORD_13) */
324	addi r0, r0, 4
325	/* Will r2 be > 64 after we subtract 64 below? */
326	shri r4, r2, 7
327	}
328	{
329EX:	sw r0, r11                                      /* store(WORD_14) */
330	addi r0, r0, 8
331	/* Record 64 bytes successfully copied. */
332	addi r2, r2, -64
333	}
334
335	{ jrp lr; move lr, r27 }
336
337	/* Convey to the backtrace library that the stack frame is size
338	 * zero, and the real return address is on the stack rather than
339	 * in 'lr'.
340	 */
341	{ info 8 }
342
343	.align 64
344.Lcopy_unaligned_maybe_many:
345	/* Skip the setup overhead if we aren't copying many bytes. */
346	{ slti_u r8, r2, 20; sub r4, zero, r0 }
347	{ bnzt r8, .Lcopy_unaligned_few; andi r4, r4, 3 }
348	{ bz r4, .Ldest_is_word_aligned; add r18, r1, r2 }
349
350/*
351 *
352 * unaligned 4 byte at a time copy handler.
353 *
354 */
355
356	/* Copy single bytes until r0 == 0 mod 4, so we can store words. */
357.Lalign_dest_loop:
358EX:	{ lb_u r3, r1; addi r1, r1, 1; addi r4, r4, -1 }
359EX:	{ sb r0, r3;   addi r0, r0, 1; addi r2, r2, -1 }
360	{ bnzt r4, .Lalign_dest_loop; andi r3, r1, 3 }
361
362	/* If source and dest are now *both* aligned, do an aligned copy. */
363	{ bz r3, .Lcheck_aligned_copy_size; addli r4, r2, -256 }
364
365.Ldest_is_word_aligned:
366
367EX:	{ andi r8, r0, 63; lwadd_na r6, r1, 4}
368	{ slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned }
369
370	/* This copies unaligned words until either there are fewer
371	 * than 4 bytes left to copy, or until the destination pointer
372	 * is cache-aligned, whichever comes first.
373	 *
374	 * On entry:
375	 * - r0 is the next store address.
376	 * - r1 points 4 bytes past the load address corresponding to r0.
377	 * - r2 >= 4
378	 * - r6 is the next aligned word loaded.
379	 */
380.Lcopy_unaligned_src_words:
381EX:	{ lwadd_na r7, r1, 4; slti_u r8, r2, 4 + 4 }
382	/* stall */
383	{ dword_align r6, r7, r1; slti_u r9, r2, 64 + 4 }
384EX:	{ swadd r0, r6, 4; addi r2, r2, -4 }
385	{ bnz r8, .Lcleanup_unaligned_words; andi r8, r0, 63 }
386	{ bnzt r8, .Lcopy_unaligned_src_words; move r6, r7 }
387
388	/* On entry:
389	 * - r0 is the next store address.
390	 * - r1 points 4 bytes past the load address corresponding to r0.
391	 * - r2 >= 4 (# of bytes left to store).
392	 * - r6 is the next aligned src word value.
393	 * - r9 = (r2 < 64U).
394	 * - r18 points one byte past the end of source memory.
395	 */
396.Ldest_is_L2_line_aligned:
397
398	{
399	/* Not a full cache line remains. */
400	bnz r9, .Lcleanup_unaligned_words
401	move r7, r6
402	}
403
404	/* r2 >= 64 */
405
406	/* Kick off two prefetches, but don't go past the end. */
407	{ addi r3, r1, 63 - 4; addi r8, r1, 64 + 63 - 4 }
408	{ prefetch r3; move r3, r8; slt_u r8, r8, r18 }
409	{ mvz r3, r8, r1; addi r8, r3, 64 }
410	{ prefetch r3; move r3, r8; slt_u r8, r8, r18 }
411	{ mvz r3, r8, r1; movei r17, 0 }
412
413.Lcopy_unaligned_line:
414	/* Prefetch another line. */
415	{ prefetch r3; addi r15, r1, 60; addi r3, r3, 64 }
416	/* Fire off a load of the last word we are about to copy. */
417EX:	{ lw_na r15, r15; slt_u r8, r3, r18 }
418
419EX:	{ mvz r3, r8, r1; wh64 r0 }
420
421	/* This loop runs twice.
422	 *
423	 * On entry:
424	 * - r17 is even before the first iteration, and odd before
425	 *   the second.  It is incremented inside the loop.  Encountering
426	 *   an even value at the end of the loop makes it stop.
427	 */
428.Lcopy_half_an_unaligned_line:
429EX:	{
430	/* Stall until the last byte is ready. In the steady state this
431	 * guarantees all words to load below will be in the L2 cache, which
432	 * avoids shunting the loads to the RTF.
433	 */
434	move zero, r15
435	lwadd_na r7, r1, 16
436	}
437EX:	{ lwadd_na r11, r1, 12 }
438EX:	{ lwadd_na r14, r1, -24 }
439EX:	{ lwadd_na r8, r1, 4 }
440EX:	{ lwadd_na r9, r1, 4 }
441EX:	{
442	lwadd_na r10, r1, 8
443	/* r16 = (r2 < 64), after we subtract 32 from r2 below. */
444	slti_u r16, r2, 64 + 32
445	}
446EX:	{ lwadd_na r12, r1, 4; addi r17, r17, 1 }
447EX:	{ lwadd_na r13, r1, 8; dword_align r6, r7, r1 }
448EX:	{ swadd r0, r6,  4; dword_align r7,  r8,  r1 }
449EX:	{ swadd r0, r7,  4; dword_align r8,  r9,  r1 }
450EX:	{ swadd r0, r8,  4; dword_align r9,  r10, r1 }
451EX:	{ swadd r0, r9,  4; dword_align r10, r11, r1 }
452EX:	{ swadd r0, r10, 4; dword_align r11, r12, r1 }
453EX:	{ swadd r0, r11, 4; dword_align r12, r13, r1 }
454EX:	{ swadd r0, r12, 4; dword_align r13, r14, r1 }
455EX:	{ swadd r0, r13, 4; addi r2, r2, -32 }
456	{ move r6, r14; bbst r17, .Lcopy_half_an_unaligned_line }
457
458	{ bzt r16, .Lcopy_unaligned_line; move r7, r6 }
459
460	/* On entry:
461	 * - r0 is the next store address.
462	 * - r1 points 4 bytes past the load address corresponding to r0.
463	 * - r2 >= 0 (# of bytes left to store).
464	 * - r7 is the next aligned src word value.
465	 */
466.Lcleanup_unaligned_words:
467	/* Handle any trailing bytes. */
468	{ bz r2, .Lcopy_unaligned_done; slti_u r8, r2, 4 }
469	{ bzt r8, .Lcopy_unaligned_src_words; move r6, r7 }
470
471	/* Move r1 back to the point where it corresponds to r0. */
472	{ addi r1, r1, -4 }
473
474	/* Fall through */
475
476/*
477 *
478 * 1 byte at a time copy handler.
479 *
480 */
481
482.Lcopy_unaligned_few:
483EX:	{ lb_u r3, r1; addi r1, r1, 1 }
484EX:	{ sb r0, r3;   addi r0, r0, 1; addi r2, r2, -1 }
485	{ bnzt r2, .Lcopy_unaligned_few }
486
487.Lcopy_unaligned_done:
488
489	/* For memcpy return original dest address, else zero. */
490	{ mz r0, r29, r23; jrp lr }
491
492.Lend_memcpy_common:
493	.size memcpy_common, .Lend_memcpy_common - memcpy_common
494
495	.section .fixup,"ax"
496memcpy_common_fixup:
497	.type memcpy_common_fixup, @function
498
499	/* Skip any bytes we already successfully copied.
500	 * r2 (num remaining) is correct, but r0 (dst) and r1 (src)
501	 * may not be quite right because of unrolling and prefetching.
502	 * So we need to recompute their values as the address just
503	 * after the last byte we are sure was successfully loaded and
504	 * then stored.
505	 */
506
507	/* Determine how many bytes we successfully copied. */
508	{ sub r3, r25, r2 }
509
510	/* Add this to the original r0 and r1 to get their new values. */
511	{ add r0, r23, r3; add r1, r24, r3 }
512
513	{ bzt r29, memcpy_fixup_loop }
514	{ blzt r29, copy_to_user_fixup_loop }
515
516copy_from_user_fixup_loop:
517	/* Try copying the rest one byte at a time, expecting a load fault. */
518.Lcfu:	{ lb_u r3, r1; addi r1, r1, 1 }
519	{ sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
520	{ bnzt r2, copy_from_user_fixup_loop }
521
522.Lcopy_from_user_fixup_zero_remainder:
523	{ bbs r29, 2f }  /* low bit set means IS_COPY_FROM_USER */
524	/* byte-at-a-time loop faulted, so zero the rest. */
525	{ move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
5261:      { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
527	{ bnzt r3, 1b }
5282:	move lr, r27
529	{ move r0, r2; jrp lr }
530
531copy_to_user_fixup_loop:
532	/* Try copying the rest one byte at a time, expecting a store fault. */
533	{ lb_u r3, r1; addi r1, r1, 1 }
534.Lctu:	{ sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
535	{ bnzt r2, copy_to_user_fixup_loop }
536.Lcopy_to_user_fixup_done:
537	move lr, r27
538	{ move r0, r2; jrp lr }
539
540memcpy_fixup_loop:
541	/* Try copying the rest one byte at a time. We expect a disastrous
542	 * fault to happen since we are in fixup code, but let it happen.
543	 */
544	{ lb_u r3, r1; addi r1, r1, 1 }
545	{ sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
546	{ bnzt r2, memcpy_fixup_loop }
547	/* This should be unreachable, we should have faulted again.
548	 * But be paranoid and handle it in case some interrupt changed
549	 * the TLB or something.
550	 */
551	move lr, r27
552	{ move r0, r23; jrp lr }
553
554	.size memcpy_common_fixup, . - memcpy_common_fixup
555
556	.section __ex_table,"a"
557	.align 4
558	.word .Lcfu, .Lcopy_from_user_fixup_zero_remainder
559	.word .Lctu, .Lcopy_to_user_fixup_done
560