1/*
2 *  linux/arch/arm/vfp/vfpmodule.c
3 *
4 *  Copyright (C) 2004 ARM Limited.
5 *  Written by Deep Blue Solutions Limited.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/types.h>
12#include <linux/cpu.h>
13#include <linux/cpu_pm.h>
14#include <linux/hardirq.h>
15#include <linux/kernel.h>
16#include <linux/notifier.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/uaccess.h>
22#include <linux/user.h>
23#include <linux/export.h>
24
25#include <asm/cp15.h>
26#include <asm/cputype.h>
27#include <asm/system_info.h>
28#include <asm/thread_notify.h>
29#include <asm/vfp.h>
30
31#include "vfpinstr.h"
32#include "vfp.h"
33
34/*
35 * Our undef handlers (in entry.S)
36 */
37void vfp_testing_entry(void);
38void vfp_support_entry(void);
39void vfp_null_entry(void);
40
41void (*vfp_vector)(void) = vfp_null_entry;
42
43/*
44 * Dual-use variable.
45 * Used in startup: set to non-zero if VFP checks fail
46 * After startup, holds VFP architecture
47 */
48unsigned int VFP_arch;
49
50/*
51 * The pointer to the vfpstate structure of the thread which currently
52 * owns the context held in the VFP hardware, or NULL if the hardware
53 * context is invalid.
54 *
55 * For UP, this is sufficient to tell which thread owns the VFP context.
56 * However, for SMP, we also need to check the CPU number stored in the
57 * saved state too to catch migrations.
58 */
59union vfp_state *vfp_current_hw_state[NR_CPUS];
60
61/*
62 * Is 'thread's most up to date state stored in this CPUs hardware?
63 * Must be called from non-preemptible context.
64 */
65static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
66{
67#ifdef CONFIG_SMP
68	if (thread->vfpstate.hard.cpu != cpu)
69		return false;
70#endif
71	return vfp_current_hw_state[cpu] == &thread->vfpstate;
72}
73
74/*
75 * Force a reload of the VFP context from the thread structure.  We do
76 * this by ensuring that access to the VFP hardware is disabled, and
77 * clear vfp_current_hw_state.  Must be called from non-preemptible context.
78 */
79static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
80{
81	if (vfp_state_in_hw(cpu, thread)) {
82		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
83		vfp_current_hw_state[cpu] = NULL;
84	}
85#ifdef CONFIG_SMP
86	thread->vfpstate.hard.cpu = NR_CPUS;
87#endif
88}
89
90/*
91 * Per-thread VFP initialization.
92 */
93static void vfp_thread_flush(struct thread_info *thread)
94{
95	union vfp_state *vfp = &thread->vfpstate;
96	unsigned int cpu;
97
98	/*
99	 * Disable VFP to ensure we initialize it first.  We must ensure
100	 * that the modification of vfp_current_hw_state[] and hardware
101	 * disable are done for the same CPU and without preemption.
102	 *
103	 * Do this first to ensure that preemption won't overwrite our
104	 * state saving should access to the VFP be enabled at this point.
105	 */
106	cpu = get_cpu();
107	if (vfp_current_hw_state[cpu] == vfp)
108		vfp_current_hw_state[cpu] = NULL;
109	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
110	put_cpu();
111
112	memset(vfp, 0, sizeof(union vfp_state));
113
114	vfp->hard.fpexc = FPEXC_EN;
115	vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
116#ifdef CONFIG_SMP
117	vfp->hard.cpu = NR_CPUS;
118#endif
119}
120
121static void vfp_thread_exit(struct thread_info *thread)
122{
123	/* release case: Per-thread VFP cleanup. */
124	union vfp_state *vfp = &thread->vfpstate;
125	unsigned int cpu = get_cpu();
126
127	if (vfp_current_hw_state[cpu] == vfp)
128		vfp_current_hw_state[cpu] = NULL;
129	put_cpu();
130}
131
132static void vfp_thread_copy(struct thread_info *thread)
133{
134	struct thread_info *parent = current_thread_info();
135
136	vfp_sync_hwstate(parent);
137	thread->vfpstate = parent->vfpstate;
138#ifdef CONFIG_SMP
139	thread->vfpstate.hard.cpu = NR_CPUS;
140#endif
141}
142
143/*
144 * When this function is called with the following 'cmd's, the following
145 * is true while this function is being run:
146 *  THREAD_NOFTIFY_SWTICH:
147 *   - the previously running thread will not be scheduled onto another CPU.
148 *   - the next thread to be run (v) will not be running on another CPU.
149 *   - thread->cpu is the local CPU number
150 *   - not preemptible as we're called in the middle of a thread switch
151 *  THREAD_NOTIFY_FLUSH:
152 *   - the thread (v) will be running on the local CPU, so
153 *	v === current_thread_info()
154 *   - thread->cpu is the local CPU number at the time it is accessed,
155 *	but may change at any time.
156 *   - we could be preempted if tree preempt rcu is enabled, so
157 *	it is unsafe to use thread->cpu.
158 *  THREAD_NOTIFY_EXIT
159 *   - the thread (v) will be running on the local CPU, so
160 *	v === current_thread_info()
161 *   - thread->cpu is the local CPU number at the time it is accessed,
162 *	but may change at any time.
163 *   - we could be preempted if tree preempt rcu is enabled, so
164 *	it is unsafe to use thread->cpu.
165 */
166static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
167{
168	struct thread_info *thread = v;
169	u32 fpexc;
170#ifdef CONFIG_SMP
171	unsigned int cpu;
172#endif
173
174	switch (cmd) {
175	case THREAD_NOTIFY_SWITCH:
176		fpexc = fmrx(FPEXC);
177
178#ifdef CONFIG_SMP
179		cpu = thread->cpu;
180
181		/*
182		 * On SMP, if VFP is enabled, save the old state in
183		 * case the thread migrates to a different CPU. The
184		 * restoring is done lazily.
185		 */
186		if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
187			vfp_save_state(vfp_current_hw_state[cpu], fpexc);
188#endif
189
190		/*
191		 * Always disable VFP so we can lazily save/restore the
192		 * old state.
193		 */
194		fmxr(FPEXC, fpexc & ~FPEXC_EN);
195		break;
196
197	case THREAD_NOTIFY_FLUSH:
198		vfp_thread_flush(thread);
199		break;
200
201	case THREAD_NOTIFY_EXIT:
202		vfp_thread_exit(thread);
203		break;
204
205	case THREAD_NOTIFY_COPY:
206		vfp_thread_copy(thread);
207		break;
208	}
209
210	return NOTIFY_DONE;
211}
212
213static struct notifier_block vfp_notifier_block = {
214	.notifier_call	= vfp_notifier,
215};
216
217/*
218 * Raise a SIGFPE for the current process.
219 * sicode describes the signal being raised.
220 */
221static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
222{
223	siginfo_t info;
224
225	memset(&info, 0, sizeof(info));
226
227	info.si_signo = SIGFPE;
228	info.si_code = sicode;
229	info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
230
231	/*
232	 * This is the same as NWFPE, because it's not clear what
233	 * this is used for
234	 */
235	current->thread.error_code = 0;
236	current->thread.trap_no = 6;
237
238	send_sig_info(SIGFPE, &info, current);
239}
240
241static void vfp_panic(char *reason, u32 inst)
242{
243	int i;
244
245	pr_err("VFP: Error: %s\n", reason);
246	pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
247		fmrx(FPEXC), fmrx(FPSCR), inst);
248	for (i = 0; i < 32; i += 2)
249		pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
250		       i, vfp_get_float(i), i+1, vfp_get_float(i+1));
251}
252
253/*
254 * Process bitmask of exception conditions.
255 */
256static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs)
257{
258	int si_code = 0;
259
260	pr_debug("VFP: raising exceptions %08x\n", exceptions);
261
262	if (exceptions == VFP_EXCEPTION_ERROR) {
263		vfp_panic("unhandled bounce", inst);
264		vfp_raise_sigfpe(0, regs);
265		return;
266	}
267
268	/*
269	 * If any of the status flags are set, update the FPSCR.
270	 * Comparison instructions always return at least one of
271	 * these flags set.
272	 */
273	if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
274		fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
275
276	fpscr |= exceptions;
277
278	fmxr(FPSCR, fpscr);
279
280#define RAISE(stat,en,sig)				\
281	if (exceptions & stat && fpscr & en)		\
282		si_code = sig;
283
284	/*
285	 * These are arranged in priority order, least to highest.
286	 */
287	RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);
288	RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);
289	RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);
290	RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);
291	RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);
292
293	if (si_code)
294		vfp_raise_sigfpe(si_code, regs);
295}
296
297/*
298 * Emulate a VFP instruction.
299 */
300static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
301{
302	u32 exceptions = VFP_EXCEPTION_ERROR;
303
304	pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
305
306	if (INST_CPRTDO(inst)) {
307		if (!INST_CPRT(inst)) {
308			/*
309			 * CPDO
310			 */
311			if (vfp_single(inst)) {
312				exceptions = vfp_single_cpdo(inst, fpscr);
313			} else {
314				exceptions = vfp_double_cpdo(inst, fpscr);
315			}
316		} else {
317			/*
318			 * A CPRT instruction can not appear in FPINST2, nor
319			 * can it cause an exception.  Therefore, we do not
320			 * have to emulate it.
321			 */
322		}
323	} else {
324		/*
325		 * A CPDT instruction can not appear in FPINST2, nor can
326		 * it cause an exception.  Therefore, we do not have to
327		 * emulate it.
328		 */
329	}
330	return exceptions & ~VFP_NAN_FLAG;
331}
332
333/*
334 * Package up a bounce condition.
335 */
336void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
337{
338	u32 fpscr, orig_fpscr, fpsid, exceptions;
339
340	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
341
342	/*
343	 * At this point, FPEXC can have the following configuration:
344	 *
345	 *  EX DEX IXE
346	 *  0   1   x   - synchronous exception
347	 *  1   x   0   - asynchronous exception
348	 *  1   x   1   - sychronous on VFP subarch 1 and asynchronous on later
349	 *  0   0   1   - synchronous on VFP9 (non-standard subarch 1
350	 *                implementation), undefined otherwise
351	 *
352	 * Clear various bits and enable access to the VFP so we can
353	 * handle the bounce.
354	 */
355	fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
356
357	fpsid = fmrx(FPSID);
358	orig_fpscr = fpscr = fmrx(FPSCR);
359
360	/*
361	 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
362	 */
363	if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
364	    && (fpscr & FPSCR_IXE)) {
365		/*
366		 * Synchronous exception, emulate the trigger instruction
367		 */
368		goto emulate;
369	}
370
371	if (fpexc & FPEXC_EX) {
372#ifndef CONFIG_CPU_FEROCEON
373		/*
374		 * Asynchronous exception. The instruction is read from FPINST
375		 * and the interrupted instruction has to be restarted.
376		 */
377		trigger = fmrx(FPINST);
378		regs->ARM_pc -= 4;
379#endif
380	} else if (!(fpexc & FPEXC_DEX)) {
381		/*
382		 * Illegal combination of bits. It can be caused by an
383		 * unallocated VFP instruction but with FPSCR.IXE set and not
384		 * on VFP subarch 1.
385		 */
386		 vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
387		goto exit;
388	}
389
390	/*
391	 * Modify fpscr to indicate the number of iterations remaining.
392	 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
393	 * whether FPEXC.VECITR or FPSCR.LEN is used.
394	 */
395	if (fpexc & (FPEXC_EX | FPEXC_VV)) {
396		u32 len;
397
398		len = fpexc + (1 << FPEXC_LENGTH_BIT);
399
400		fpscr &= ~FPSCR_LENGTH_MASK;
401		fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
402	}
403
404	/*
405	 * Handle the first FP instruction.  We used to take note of the
406	 * FPEXC bounce reason, but this appears to be unreliable.
407	 * Emulate the bounced instruction instead.
408	 */
409	exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
410	if (exceptions)
411		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
412
413	/*
414	 * If there isn't a second FP instruction, exit now. Note that
415	 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
416	 */
417	if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
418		goto exit;
419
420	/*
421	 * The barrier() here prevents fpinst2 being read
422	 * before the condition above.
423	 */
424	barrier();
425	trigger = fmrx(FPINST2);
426
427 emulate:
428	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
429	if (exceptions)
430		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
431 exit:
432	preempt_enable();
433}
434
435static void vfp_enable(void *unused)
436{
437	u32 access;
438
439	BUG_ON(preemptible());
440	access = get_copro_access();
441
442	/*
443	 * Enable full access to VFP (cp10 and cp11)
444	 */
445	set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
446}
447
448/* Called by platforms on which we want to disable VFP because it may not be
449 * present on all CPUs within a SMP complex. Needs to be called prior to
450 * vfp_init().
451 */
452void vfp_disable(void)
453{
454	if (VFP_arch) {
455		pr_debug("%s: should be called prior to vfp_init\n", __func__);
456		return;
457	}
458	VFP_arch = 1;
459}
460
461#ifdef CONFIG_CPU_PM
462static int vfp_pm_suspend(void)
463{
464	struct thread_info *ti = current_thread_info();
465	u32 fpexc = fmrx(FPEXC);
466
467	/* if vfp is on, then save state for resumption */
468	if (fpexc & FPEXC_EN) {
469		pr_debug("%s: saving vfp state\n", __func__);
470		vfp_save_state(&ti->vfpstate, fpexc);
471
472		/* disable, just in case */
473		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
474	} else if (vfp_current_hw_state[ti->cpu]) {
475#ifndef CONFIG_SMP
476		fmxr(FPEXC, fpexc | FPEXC_EN);
477		vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
478		fmxr(FPEXC, fpexc);
479#endif
480	}
481
482	/* clear any information we had about last context state */
483	vfp_current_hw_state[ti->cpu] = NULL;
484
485	return 0;
486}
487
488static void vfp_pm_resume(void)
489{
490	/* ensure we have access to the vfp */
491	vfp_enable(NULL);
492
493	/* and disable it to ensure the next usage restores the state */
494	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
495}
496
497static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
498	void *v)
499{
500	switch (cmd) {
501	case CPU_PM_ENTER:
502		vfp_pm_suspend();
503		break;
504	case CPU_PM_ENTER_FAILED:
505	case CPU_PM_EXIT:
506		vfp_pm_resume();
507		break;
508	}
509	return NOTIFY_OK;
510}
511
512static struct notifier_block vfp_cpu_pm_notifier_block = {
513	.notifier_call = vfp_cpu_pm_notifier,
514};
515
516static void vfp_pm_init(void)
517{
518	cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
519}
520
521#else
522static inline void vfp_pm_init(void) { }
523#endif /* CONFIG_CPU_PM */
524
525/*
526 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
527 * with the hardware state.
528 */
529void vfp_sync_hwstate(struct thread_info *thread)
530{
531	unsigned int cpu = get_cpu();
532
533	if (vfp_state_in_hw(cpu, thread)) {
534		u32 fpexc = fmrx(FPEXC);
535
536		/*
537		 * Save the last VFP state on this CPU.
538		 */
539		fmxr(FPEXC, fpexc | FPEXC_EN);
540		vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
541		fmxr(FPEXC, fpexc);
542	}
543
544	put_cpu();
545}
546
547/* Ensure that the thread reloads the hardware VFP state on the next use. */
548void vfp_flush_hwstate(struct thread_info *thread)
549{
550	unsigned int cpu = get_cpu();
551
552	vfp_force_reload(cpu, thread);
553
554	put_cpu();
555}
556
557/*
558 * Save the current VFP state into the provided structures and prepare
559 * for entry into a new function (signal handler).
560 */
561int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
562				    struct user_vfp_exc __user *ufp_exc)
563{
564	struct thread_info *thread = current_thread_info();
565	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
566	int err = 0;
567
568	/* Ensure that the saved hwstate is up-to-date. */
569	vfp_sync_hwstate(thread);
570
571	/*
572	 * Copy the floating point registers. There can be unused
573	 * registers see asm/hwcap.h for details.
574	 */
575	err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
576			      sizeof(hwstate->fpregs));
577	/*
578	 * Copy the status and control register.
579	 */
580	__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
581
582	/*
583	 * Copy the exception registers.
584	 */
585	__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
586	__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
587	__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
588
589	if (err)
590		return -EFAULT;
591
592	/* Ensure that VFP is disabled. */
593	vfp_flush_hwstate(thread);
594
595	/*
596	 * As per the PCS, clear the length and stride bits for function
597	 * entry.
598	 */
599	hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
600	return 0;
601}
602
603/* Sanitise and restore the current VFP state from the provided structures. */
604int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
605			     struct user_vfp_exc __user *ufp_exc)
606{
607	struct thread_info *thread = current_thread_info();
608	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
609	unsigned long fpexc;
610	int err = 0;
611
612	/* Disable VFP to avoid corrupting the new thread state. */
613	vfp_flush_hwstate(thread);
614
615	/*
616	 * Copy the floating point registers. There can be unused
617	 * registers see asm/hwcap.h for details.
618	 */
619	err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
620				sizeof(hwstate->fpregs));
621	/*
622	 * Copy the status and control register.
623	 */
624	__get_user_error(hwstate->fpscr, &ufp->fpscr, err);
625
626	/*
627	 * Sanitise and restore the exception registers.
628	 */
629	__get_user_error(fpexc, &ufp_exc->fpexc, err);
630
631	/* Ensure the VFP is enabled. */
632	fpexc |= FPEXC_EN;
633
634	/* Ensure FPINST2 is invalid and the exception flag is cleared. */
635	fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
636	hwstate->fpexc = fpexc;
637
638	__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
639	__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
640
641	return err ? -EFAULT : 0;
642}
643
644/*
645 * VFP hardware can lose all context when a CPU goes offline.
646 * As we will be running in SMP mode with CPU hotplug, we will save the
647 * hardware state at every thread switch.  We clear our held state when
648 * a CPU has been killed, indicating that the VFP hardware doesn't contain
649 * a threads VFP state.  When a CPU starts up, we re-enable access to the
650 * VFP hardware.
651 *
652 * Both CPU_DYING and CPU_STARTING are called on the CPU which
653 * is being offlined/onlined.
654 */
655static int vfp_hotplug(struct notifier_block *b, unsigned long action,
656	void *hcpu)
657{
658	if (action == CPU_DYING || action == CPU_DYING_FROZEN)
659		vfp_current_hw_state[(long)hcpu] = NULL;
660	else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
661		vfp_enable(NULL);
662	return NOTIFY_OK;
663}
664
665void vfp_kmode_exception(void)
666{
667	/*
668	 * If we reach this point, a floating point exception has been raised
669	 * while running in kernel mode. If the NEON/VFP unit was enabled at the
670	 * time, it means a VFP instruction has been issued that requires
671	 * software assistance to complete, something which is not currently
672	 * supported in kernel mode.
673	 * If the NEON/VFP unit was disabled, and the location pointed to below
674	 * is properly preceded by a call to kernel_neon_begin(), something has
675	 * caused the task to be scheduled out and back in again. In this case,
676	 * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
677	 * be helpful in localizing the problem.
678	 */
679	if (fmrx(FPEXC) & FPEXC_EN)
680		pr_crit("BUG: unsupported FP instruction in kernel mode\n");
681	else
682		pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
683}
684
685#ifdef CONFIG_KERNEL_MODE_NEON
686
687/*
688 * Kernel-side NEON support functions
689 */
690void kernel_neon_begin(void)
691{
692	struct thread_info *thread = current_thread_info();
693	unsigned int cpu;
694	u32 fpexc;
695
696	/*
697	 * Kernel mode NEON is only allowed outside of interrupt context
698	 * with preemption disabled. This will make sure that the kernel
699	 * mode NEON register contents never need to be preserved.
700	 */
701	BUG_ON(in_interrupt());
702	cpu = get_cpu();
703
704	fpexc = fmrx(FPEXC) | FPEXC_EN;
705	fmxr(FPEXC, fpexc);
706
707	/*
708	 * Save the userland NEON/VFP state. Under UP,
709	 * the owner could be a task other than 'current'
710	 */
711	if (vfp_state_in_hw(cpu, thread))
712		vfp_save_state(&thread->vfpstate, fpexc);
713#ifndef CONFIG_SMP
714	else if (vfp_current_hw_state[cpu] != NULL)
715		vfp_save_state(vfp_current_hw_state[cpu], fpexc);
716#endif
717	vfp_current_hw_state[cpu] = NULL;
718}
719EXPORT_SYMBOL(kernel_neon_begin);
720
721void kernel_neon_end(void)
722{
723	/* Disable the NEON/VFP unit. */
724	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
725	put_cpu();
726}
727EXPORT_SYMBOL(kernel_neon_end);
728
729#endif /* CONFIG_KERNEL_MODE_NEON */
730
731/*
732 * VFP support code initialisation.
733 */
734static int __init vfp_init(void)
735{
736	unsigned int vfpsid;
737	unsigned int cpu_arch = cpu_architecture();
738
739	if (cpu_arch >= CPU_ARCH_ARMv6)
740		on_each_cpu(vfp_enable, NULL, 1);
741
742	/*
743	 * First check that there is a VFP that we can use.
744	 * The handler is already setup to just log calls, so
745	 * we just need to read the VFPSID register.
746	 */
747	vfp_vector = vfp_testing_entry;
748	barrier();
749	vfpsid = fmrx(FPSID);
750	barrier();
751	vfp_vector = vfp_null_entry;
752
753	pr_info("VFP support v0.3: ");
754	if (VFP_arch) {
755		pr_cont("not present\n");
756		return 0;
757	/* Extract the architecture on CPUID scheme */
758	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
759		VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK;
760		VFP_arch >>= FPSID_ARCH_BIT;
761		/*
762		 * Check for the presence of the Advanced SIMD
763		 * load/store instructions, integer and single
764		 * precision floating point operations. Only check
765		 * for NEON if the hardware has the MVFR registers.
766		 */
767		if (IS_ENABLED(CONFIG_NEON) &&
768		   (fmrx(MVFR1) & 0x000fff00) == 0x00011100)
769			elf_hwcap |= HWCAP_NEON;
770
771		if (IS_ENABLED(CONFIG_VFPv3)) {
772			u32 mvfr0 = fmrx(MVFR0);
773			if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 ||
774			    ((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) {
775				elf_hwcap |= HWCAP_VFPv3;
776				/*
777				 * Check for VFPv3 D16 and VFPv4 D16.  CPUs in
778				 * this configuration only have 16 x 64bit
779				 * registers.
780				 */
781				if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1)
782					/* also v4-D16 */
783					elf_hwcap |= HWCAP_VFPv3D16;
784				else
785					elf_hwcap |= HWCAP_VFPD32;
786			}
787
788			if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
789				elf_hwcap |= HWCAP_VFPv4;
790		}
791	/* Extract the architecture version on pre-cpuid scheme */
792	} else {
793		if (vfpsid & FPSID_NODOUBLE) {
794			pr_cont("no double precision support\n");
795			return 0;
796		}
797
798		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
799	}
800
801	hotcpu_notifier(vfp_hotplug, 0);
802
803	vfp_vector = vfp_support_entry;
804
805	thread_register_notifier(&vfp_notifier_block);
806	vfp_pm_init();
807
808	/*
809	 * We detected VFP, and the support code is
810	 * in place; report VFP support to userspace.
811	 */
812	elf_hwcap |= HWCAP_VFP;
813
814	pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
815		(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
816		VFP_arch,
817		(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
818		(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
819		(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
820
821	return 0;
822}
823
824core_initcall(vfp_init);
825