1/*
2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
13
14#include <asm/kvm_book3s.h>
15#include <asm/kvm_ppc.h>
16#include <asm/hvcall.h>
17#include <asm/xics.h>
18#include <asm/debug.h>
19#include <asm/synch.h>
20#include <asm/ppc-opcode.h>
21
22#include "book3s_xics.h"
23
24#define DEBUG_PASSUP
25
26static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
27			    u32 new_irq);
28
29/* -- ICS routines -- */
30static void ics_rm_check_resend(struct kvmppc_xics *xics,
31				struct kvmppc_ics *ics, struct kvmppc_icp *icp)
32{
33	int i;
34
35	arch_spin_lock(&ics->lock);
36
37	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
38		struct ics_irq_state *state = &ics->irq_state[i];
39
40		if (!state->resend)
41			continue;
42
43		arch_spin_unlock(&ics->lock);
44		icp_rm_deliver_irq(xics, icp, state->number);
45		arch_spin_lock(&ics->lock);
46	}
47
48	arch_spin_unlock(&ics->lock);
49}
50
51/* -- ICP routines -- */
52
53static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
54				struct kvm_vcpu *this_vcpu)
55{
56	struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
57	int cpu;
58
59	/* Mark the target VCPU as having an interrupt pending */
60	vcpu->stat.queue_intr++;
61	set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
62
63	/* Kick self ? Just set MER and return */
64	if (vcpu == this_vcpu) {
65		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
66		return;
67	}
68
69	/* Check if the core is loaded, if not, too hard */
70	cpu = vcpu->arch.thread_cpu;
71	if (cpu < 0 || cpu >= nr_cpu_ids) {
72		this_icp->rm_action |= XICS_RM_KICK_VCPU;
73		this_icp->rm_kick_target = vcpu;
74		return;
75	}
76
77	smp_mb();
78	kvmhv_rm_send_ipi(cpu);
79}
80
81static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
82{
83	/* Note: Only called on self ! */
84	clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
85		  &vcpu->arch.pending_exceptions);
86	mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
87}
88
89static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
90				     union kvmppc_icp_state old,
91				     union kvmppc_icp_state new)
92{
93	struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
94	bool success;
95
96	/* Calculate new output value */
97	new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
98
99	/* Attempt atomic update */
100	success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
101	if (!success)
102		goto bail;
103
104	/*
105	 * Check for output state update
106	 *
107	 * Note that this is racy since another processor could be updating
108	 * the state already. This is why we never clear the interrupt output
109	 * here, we only ever set it. The clear only happens prior to doing
110	 * an update and only by the processor itself. Currently we do it
111	 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
112	 *
113	 * We also do not try to figure out whether the EE state has changed,
114	 * we unconditionally set it if the new state calls for it. The reason
115	 * for that is that we opportunistically remove the pending interrupt
116	 * flag when raising CPPR, so we need to set it back here if an
117	 * interrupt is still pending.
118	 */
119	if (new.out_ee)
120		icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
121
122	/* Expose the state change for debug purposes */
123	this_vcpu->arch.icp->rm_dbgstate = new;
124	this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
125
126 bail:
127	return success;
128}
129
130static inline int check_too_hard(struct kvmppc_xics *xics,
131				 struct kvmppc_icp *icp)
132{
133	return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
134}
135
136static void icp_rm_check_resend(struct kvmppc_xics *xics,
137			     struct kvmppc_icp *icp)
138{
139	u32 icsid;
140
141	/* Order this load with the test for need_resend in the caller */
142	smp_rmb();
143	for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
144		struct kvmppc_ics *ics = xics->ics[icsid];
145
146		if (!test_and_clear_bit(icsid, icp->resend_map))
147			continue;
148		if (!ics)
149			continue;
150		ics_rm_check_resend(xics, ics, icp);
151	}
152}
153
154static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
155			       u32 *reject)
156{
157	union kvmppc_icp_state old_state, new_state;
158	bool success;
159
160	do {
161		old_state = new_state = READ_ONCE(icp->state);
162
163		*reject = 0;
164
165		/* See if we can deliver */
166		success = new_state.cppr > priority &&
167			new_state.mfrr > priority &&
168			new_state.pending_pri > priority;
169
170		/*
171		 * If we can, check for a rejection and perform the
172		 * delivery
173		 */
174		if (success) {
175			*reject = new_state.xisr;
176			new_state.xisr = irq;
177			new_state.pending_pri = priority;
178		} else {
179			/*
180			 * If we failed to deliver we set need_resend
181			 * so a subsequent CPPR state change causes us
182			 * to try a new delivery.
183			 */
184			new_state.need_resend = true;
185		}
186
187	} while (!icp_rm_try_update(icp, old_state, new_state));
188
189	return success;
190}
191
192static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
193			    u32 new_irq)
194{
195	struct ics_irq_state *state;
196	struct kvmppc_ics *ics;
197	u32 reject;
198	u16 src;
199
200	/*
201	 * This is used both for initial delivery of an interrupt and
202	 * for subsequent rejection.
203	 *
204	 * Rejection can be racy vs. resends. We have evaluated the
205	 * rejection in an atomic ICP transaction which is now complete,
206	 * so potentially the ICP can already accept the interrupt again.
207	 *
208	 * So we need to retry the delivery. Essentially the reject path
209	 * boils down to a failed delivery. Always.
210	 *
211	 * Now the interrupt could also have moved to a different target,
212	 * thus we may need to re-do the ICP lookup as well
213	 */
214
215 again:
216	/* Get the ICS state and lock it */
217	ics = kvmppc_xics_find_ics(xics, new_irq, &src);
218	if (!ics) {
219		/* Unsafe increment, but this does not need to be accurate */
220		xics->err_noics++;
221		return;
222	}
223	state = &ics->irq_state[src];
224
225	/* Get a lock on the ICS */
226	arch_spin_lock(&ics->lock);
227
228	/* Get our server */
229	if (!icp || state->server != icp->server_num) {
230		icp = kvmppc_xics_find_server(xics->kvm, state->server);
231		if (!icp) {
232			/* Unsafe increment again*/
233			xics->err_noicp++;
234			goto out;
235		}
236	}
237
238	/* Clear the resend bit of that interrupt */
239	state->resend = 0;
240
241	/*
242	 * If masked, bail out
243	 *
244	 * Note: PAPR doesn't mention anything about masked pending
245	 * when doing a resend, only when doing a delivery.
246	 *
247	 * However that would have the effect of losing a masked
248	 * interrupt that was rejected and isn't consistent with
249	 * the whole masked_pending business which is about not
250	 * losing interrupts that occur while masked.
251	 *
252	 * I don't differentiate normal deliveries and resends, this
253	 * implementation will differ from PAPR and not lose such
254	 * interrupts.
255	 */
256	if (state->priority == MASKED) {
257		state->masked_pending = 1;
258		goto out;
259	}
260
261	/*
262	 * Try the delivery, this will set the need_resend flag
263	 * in the ICP as part of the atomic transaction if the
264	 * delivery is not possible.
265	 *
266	 * Note that if successful, the new delivery might have itself
267	 * rejected an interrupt that was "delivered" before we took the
268	 * ics spin lock.
269	 *
270	 * In this case we do the whole sequence all over again for the
271	 * new guy. We cannot assume that the rejected interrupt is less
272	 * favored than the new one, and thus doesn't need to be delivered,
273	 * because by the time we exit icp_rm_try_to_deliver() the target
274	 * processor may well have already consumed & completed it, and thus
275	 * the rejected interrupt might actually be already acceptable.
276	 */
277	if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
278		/*
279		 * Delivery was successful, did we reject somebody else ?
280		 */
281		if (reject && reject != XICS_IPI) {
282			arch_spin_unlock(&ics->lock);
283			new_irq = reject;
284			goto again;
285		}
286	} else {
287		/*
288		 * We failed to deliver the interrupt we need to set the
289		 * resend map bit and mark the ICS state as needing a resend
290		 */
291		set_bit(ics->icsid, icp->resend_map);
292		state->resend = 1;
293
294		/*
295		 * If the need_resend flag got cleared in the ICP some time
296		 * between icp_rm_try_to_deliver() atomic update and now, then
297		 * we know it might have missed the resend_map bit. So we
298		 * retry
299		 */
300		smp_mb();
301		if (!icp->state.need_resend) {
302			arch_spin_unlock(&ics->lock);
303			goto again;
304		}
305	}
306 out:
307	arch_spin_unlock(&ics->lock);
308}
309
310static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
311			     u8 new_cppr)
312{
313	union kvmppc_icp_state old_state, new_state;
314	bool resend;
315
316	/*
317	 * This handles several related states in one operation:
318	 *
319	 * ICP State: Down_CPPR
320	 *
321	 * Load CPPR with new value and if the XISR is 0
322	 * then check for resends:
323	 *
324	 * ICP State: Resend
325	 *
326	 * If MFRR is more favored than CPPR, check for IPIs
327	 * and notify ICS of a potential resend. This is done
328	 * asynchronously (when used in real mode, we will have
329	 * to exit here).
330	 *
331	 * We do not handle the complete Check_IPI as documented
332	 * here. In the PAPR, this state will be used for both
333	 * Set_MFRR and Down_CPPR. However, we know that we aren't
334	 * changing the MFRR state here so we don't need to handle
335	 * the case of an MFRR causing a reject of a pending irq,
336	 * this will have been handled when the MFRR was set in the
337	 * first place.
338	 *
339	 * Thus we don't have to handle rejects, only resends.
340	 *
341	 * When implementing real mode for HV KVM, resend will lead to
342	 * a H_TOO_HARD return and the whole transaction will be handled
343	 * in virtual mode.
344	 */
345	do {
346		old_state = new_state = READ_ONCE(icp->state);
347
348		/* Down_CPPR */
349		new_state.cppr = new_cppr;
350
351		/*
352		 * Cut down Resend / Check_IPI / IPI
353		 *
354		 * The logic is that we cannot have a pending interrupt
355		 * trumped by an IPI at this point (see above), so we
356		 * know that either the pending interrupt is already an
357		 * IPI (in which case we don't care to override it) or
358		 * it's either more favored than us or non existent
359		 */
360		if (new_state.mfrr < new_cppr &&
361		    new_state.mfrr <= new_state.pending_pri) {
362			new_state.pending_pri = new_state.mfrr;
363			new_state.xisr = XICS_IPI;
364		}
365
366		/* Latch/clear resend bit */
367		resend = new_state.need_resend;
368		new_state.need_resend = 0;
369
370	} while (!icp_rm_try_update(icp, old_state, new_state));
371
372	/*
373	 * Now handle resend checks. Those are asynchronous to the ICP
374	 * state update in HW (ie bus transactions) so we can handle them
375	 * separately here as well.
376	 */
377	if (resend) {
378		icp->n_check_resend++;
379		icp_rm_check_resend(xics, icp);
380	}
381}
382
383
384unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
385{
386	union kvmppc_icp_state old_state, new_state;
387	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
388	struct kvmppc_icp *icp = vcpu->arch.icp;
389	u32 xirr;
390
391	if (!xics || !xics->real_mode)
392		return H_TOO_HARD;
393
394	/* First clear the interrupt */
395	icp_rm_clr_vcpu_irq(icp->vcpu);
396
397	/*
398	 * ICP State: Accept_Interrupt
399	 *
400	 * Return the pending interrupt (if any) along with the
401	 * current CPPR, then clear the XISR & set CPPR to the
402	 * pending priority
403	 */
404	do {
405		old_state = new_state = READ_ONCE(icp->state);
406
407		xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
408		if (!old_state.xisr)
409			break;
410		new_state.cppr = new_state.pending_pri;
411		new_state.pending_pri = 0xff;
412		new_state.xisr = 0;
413
414	} while (!icp_rm_try_update(icp, old_state, new_state));
415
416	/* Return the result in GPR4 */
417	vcpu->arch.gpr[4] = xirr;
418
419	return check_too_hard(xics, icp);
420}
421
422int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
423		    unsigned long mfrr)
424{
425	union kvmppc_icp_state old_state, new_state;
426	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
427	struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
428	u32 reject;
429	bool resend;
430	bool local;
431
432	if (!xics || !xics->real_mode)
433		return H_TOO_HARD;
434
435	local = this_icp->server_num == server;
436	if (local)
437		icp = this_icp;
438	else
439		icp = kvmppc_xics_find_server(vcpu->kvm, server);
440	if (!icp)
441		return H_PARAMETER;
442
443	/*
444	 * ICP state: Set_MFRR
445	 *
446	 * If the CPPR is more favored than the new MFRR, then
447	 * nothing needs to be done as there can be no XISR to
448	 * reject.
449	 *
450	 * ICP state: Check_IPI
451	 *
452	 * If the CPPR is less favored, then we might be replacing
453	 * an interrupt, and thus need to possibly reject it.
454	 *
455	 * ICP State: IPI
456	 *
457	 * Besides rejecting any pending interrupts, we also
458	 * update XISR and pending_pri to mark IPI as pending.
459	 *
460	 * PAPR does not describe this state, but if the MFRR is being
461	 * made less favored than its earlier value, there might be
462	 * a previously-rejected interrupt needing to be resent.
463	 * Ideally, we would want to resend only if
464	 *	prio(pending_interrupt) < mfrr &&
465	 *	prio(pending_interrupt) < cppr
466	 * where pending interrupt is the one that was rejected. But
467	 * we don't have that state, so we simply trigger a resend
468	 * whenever the MFRR is made less favored.
469	 */
470	do {
471		old_state = new_state = READ_ONCE(icp->state);
472
473		/* Set_MFRR */
474		new_state.mfrr = mfrr;
475
476		/* Check_IPI */
477		reject = 0;
478		resend = false;
479		if (mfrr < new_state.cppr) {
480			/* Reject a pending interrupt if not an IPI */
481			if (mfrr <= new_state.pending_pri) {
482				reject = new_state.xisr;
483				new_state.pending_pri = mfrr;
484				new_state.xisr = XICS_IPI;
485			}
486		}
487
488		if (mfrr > old_state.mfrr) {
489			resend = new_state.need_resend;
490			new_state.need_resend = 0;
491		}
492	} while (!icp_rm_try_update(icp, old_state, new_state));
493
494	/* Handle reject in real mode */
495	if (reject && reject != XICS_IPI) {
496		this_icp->n_reject++;
497		icp_rm_deliver_irq(xics, icp, reject);
498	}
499
500	/* Handle resends in real mode */
501	if (resend) {
502		this_icp->n_check_resend++;
503		icp_rm_check_resend(xics, icp);
504	}
505
506	return check_too_hard(xics, this_icp);
507}
508
509int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
510{
511	union kvmppc_icp_state old_state, new_state;
512	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
513	struct kvmppc_icp *icp = vcpu->arch.icp;
514	u32 reject;
515
516	if (!xics || !xics->real_mode)
517		return H_TOO_HARD;
518
519	/*
520	 * ICP State: Set_CPPR
521	 *
522	 * We can safely compare the new value with the current
523	 * value outside of the transaction as the CPPR is only
524	 * ever changed by the processor on itself
525	 */
526	if (cppr > icp->state.cppr) {
527		icp_rm_down_cppr(xics, icp, cppr);
528		goto bail;
529	} else if (cppr == icp->state.cppr)
530		return H_SUCCESS;
531
532	/*
533	 * ICP State: Up_CPPR
534	 *
535	 * The processor is raising its priority, this can result
536	 * in a rejection of a pending interrupt:
537	 *
538	 * ICP State: Reject_Current
539	 *
540	 * We can remove EE from the current processor, the update
541	 * transaction will set it again if needed
542	 */
543	icp_rm_clr_vcpu_irq(icp->vcpu);
544
545	do {
546		old_state = new_state = READ_ONCE(icp->state);
547
548		reject = 0;
549		new_state.cppr = cppr;
550
551		if (cppr <= new_state.pending_pri) {
552			reject = new_state.xisr;
553			new_state.xisr = 0;
554			new_state.pending_pri = 0xff;
555		}
556
557	} while (!icp_rm_try_update(icp, old_state, new_state));
558
559	/*
560	 * Check for rejects. They are handled by doing a new delivery
561	 * attempt (see comments in icp_rm_deliver_irq).
562	 */
563	if (reject && reject != XICS_IPI) {
564		icp->n_reject++;
565		icp_rm_deliver_irq(xics, icp, reject);
566	}
567 bail:
568	return check_too_hard(xics, icp);
569}
570
571int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
572{
573	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
574	struct kvmppc_icp *icp = vcpu->arch.icp;
575	struct kvmppc_ics *ics;
576	struct ics_irq_state *state;
577	u32 irq = xirr & 0x00ffffff;
578	u16 src;
579
580	if (!xics || !xics->real_mode)
581		return H_TOO_HARD;
582
583	/*
584	 * ICP State: EOI
585	 *
586	 * Note: If EOI is incorrectly used by SW to lower the CPPR
587	 * value (ie more favored), we do not check for rejection of
588	 * a pending interrupt, this is a SW error and PAPR sepcifies
589	 * that we don't have to deal with it.
590	 *
591	 * The sending of an EOI to the ICS is handled after the
592	 * CPPR update
593	 *
594	 * ICP State: Down_CPPR which we handle
595	 * in a separate function as it's shared with H_CPPR.
596	 */
597	icp_rm_down_cppr(xics, icp, xirr >> 24);
598
599	/* IPIs have no EOI */
600	if (irq == XICS_IPI)
601		goto bail;
602	/*
603	 * EOI handling: If the interrupt is still asserted, we need to
604	 * resend it. We can take a lockless "peek" at the ICS state here.
605	 *
606	 * "Message" interrupts will never have "asserted" set
607	 */
608	ics = kvmppc_xics_find_ics(xics, irq, &src);
609	if (!ics)
610		goto bail;
611	state = &ics->irq_state[src];
612
613	/* Still asserted, resend it */
614	if (state->asserted) {
615		icp->n_reject++;
616		icp_rm_deliver_irq(xics, icp, irq);
617	}
618
619	if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
620		icp->rm_action |= XICS_RM_NOTIFY_EOI;
621		icp->rm_eoied_irq = irq;
622	}
623 bail:
624	return check_too_hard(xics, icp);
625}
626