1/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/sched.h>
14#include <linux/mutex.h>
15#include <linux/mm.h>
16#include <linux/uaccess.h>
17#include <asm/synch.h>
18#include <misc/cxl.h>
19
20#include "cxl.h"
21#include "trace.h"
22
23static int afu_control(struct cxl_afu *afu, u64 command,
24		       u64 result, u64 mask, bool enabled)
25{
26	u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
27	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
28	int rc = 0;
29
30	spin_lock(&afu->afu_cntl_lock);
31	pr_devel("AFU command starting: %llx\n", command);
32
33	trace_cxl_afu_ctrl(afu, command);
34
35	cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
36
37	AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
38	while ((AFU_Cntl & mask) != result) {
39		if (time_after_eq(jiffies, timeout)) {
40			dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
41			rc = -EBUSY;
42			goto out;
43		}
44		pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
45				     AFU_Cntl | command);
46		cpu_relax();
47		AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
48	};
49	pr_devel("AFU command complete: %llx\n", command);
50	afu->enabled = enabled;
51out:
52	trace_cxl_afu_ctrl_done(afu, command, rc);
53	spin_unlock(&afu->afu_cntl_lock);
54
55	return rc;
56}
57
58static int afu_enable(struct cxl_afu *afu)
59{
60	pr_devel("AFU enable request\n");
61
62	return afu_control(afu, CXL_AFU_Cntl_An_E,
63			   CXL_AFU_Cntl_An_ES_Enabled,
64			   CXL_AFU_Cntl_An_ES_MASK, true);
65}
66
67int cxl_afu_disable(struct cxl_afu *afu)
68{
69	pr_devel("AFU disable request\n");
70
71	return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled,
72			   CXL_AFU_Cntl_An_ES_MASK, false);
73}
74
75/* This will disable as well as reset */
76int cxl_afu_reset(struct cxl_afu *afu)
77{
78	pr_devel("AFU reset request\n");
79
80	return afu_control(afu, CXL_AFU_Cntl_An_RA,
81			   CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
82			   CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
83			   false);
84}
85
86static int afu_check_and_enable(struct cxl_afu *afu)
87{
88	if (afu->enabled)
89		return 0;
90	return afu_enable(afu);
91}
92
93int cxl_psl_purge(struct cxl_afu *afu)
94{
95	u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
96	u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
97	u64 dsisr, dar;
98	u64 start, end;
99	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
100	int rc = 0;
101
102	trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
103
104	pr_devel("PSL purge request\n");
105
106	if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
107		WARN(1, "psl_purge request while AFU not disabled!\n");
108		cxl_afu_disable(afu);
109	}
110
111	cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
112		       PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
113	start = local_clock();
114	PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
115	while ((PSL_CNTL &  CXL_PSL_SCNTL_An_Ps_MASK)
116			== CXL_PSL_SCNTL_An_Ps_Pending) {
117		if (time_after_eq(jiffies, timeout)) {
118			dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
119			rc = -EBUSY;
120			goto out;
121		}
122		dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
123		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx  PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
124		if (dsisr & CXL_PSL_DSISR_TRANS) {
125			dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
126			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar);
127			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
128		} else if (dsisr) {
129			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr);
130			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
131		} else {
132			cpu_relax();
133		}
134		PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
135	};
136	end = local_clock();
137	pr_devel("PSL purged in %lld ns\n", end - start);
138
139	cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
140		       PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
141out:
142	trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
143	return rc;
144}
145
146static int spa_max_procs(int spa_size)
147{
148	/*
149	 * From the CAIA:
150	 *    end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
151	 * Most of that junk is really just an overly-complicated way of saying
152	 * the last 256 bytes are __aligned(128), so it's really:
153	 *    end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
154	 * and
155	 *    end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
156	 * so
157	 *    sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
158	 * Ignore the alignment (which is safe in this case as long as we are
159	 * careful with our rounding) and solve for n:
160	 */
161	return ((spa_size / 8) - 96) / 17;
162}
163
164static int alloc_spa(struct cxl_afu *afu)
165{
166	u64 spap;
167
168	/* Work out how many pages to allocate */
169	afu->spa_order = 0;
170	do {
171		afu->spa_order++;
172		afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE;
173		afu->spa_max_procs = spa_max_procs(afu->spa_size);
174	} while (afu->spa_max_procs < afu->num_procs);
175
176	WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */
177
178	if (!(afu->spa = (struct cxl_process_element *)
179	      __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) {
180		pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
181		return -ENOMEM;
182	}
183	pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
184		 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
185
186	afu->sw_command_status = (__be64 *)((char *)afu->spa +
187					    ((afu->spa_max_procs + 3) * 128));
188
189	spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr;
190	spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
191	spap |= CXL_PSL_SPAP_V;
192	pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
193	cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
194
195	return 0;
196}
197
198static void release_spa(struct cxl_afu *afu)
199{
200	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
201	free_pages((unsigned long) afu->spa, afu->spa_order);
202}
203
204int cxl_tlb_slb_invalidate(struct cxl *adapter)
205{
206	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
207
208	pr_devel("CXL adapter wide TLBIA & SLBIA\n");
209
210	cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
211
212	cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
213	while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
214		if (time_after_eq(jiffies, timeout)) {
215			dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
216			return -EBUSY;
217		}
218		cpu_relax();
219	}
220
221	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
222	while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
223		if (time_after_eq(jiffies, timeout)) {
224			dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
225			return -EBUSY;
226		}
227		cpu_relax();
228	}
229	return 0;
230}
231
232int cxl_afu_slbia(struct cxl_afu *afu)
233{
234	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
235
236	pr_devel("cxl_afu_slbia issuing SLBIA command\n");
237	cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
238	while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
239		if (time_after_eq(jiffies, timeout)) {
240			dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
241			return -EBUSY;
242		}
243		cpu_relax();
244	}
245	return 0;
246}
247
248static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
249{
250	int rc;
251
252	/* 1. Disable SSTP by writing 0 to SSTP1[V] */
253	cxl_p2n_write(afu, CXL_SSTP1_An, 0);
254
255	/* 2. Invalidate all SLB entries */
256	if ((rc = cxl_afu_slbia(afu)))
257		return rc;
258
259	/* 3. Set SSTP0_An */
260	cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
261
262	/* 4. Set SSTP1_An */
263	cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
264
265	return 0;
266}
267
268/* Using per slice version may improve performance here. (ie. SLBIA_An) */
269static void slb_invalid(struct cxl_context *ctx)
270{
271	struct cxl *adapter = ctx->afu->adapter;
272	u64 slbia;
273
274	WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex));
275
276	cxl_p1_write(adapter, CXL_PSL_LBISEL,
277			((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
278			be32_to_cpu(ctx->elem->lpid));
279	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
280
281	while (1) {
282		slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
283		if (!(slbia & CXL_TLB_SLB_P))
284			break;
285		cpu_relax();
286	}
287}
288
289static int do_process_element_cmd(struct cxl_context *ctx,
290				  u64 cmd, u64 pe_state)
291{
292	u64 state;
293	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
294	int rc = 0;
295
296	trace_cxl_llcmd(ctx, cmd);
297
298	WARN_ON(!ctx->afu->enabled);
299
300	ctx->elem->software_state = cpu_to_be32(pe_state);
301	smp_wmb();
302	*(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
303	smp_mb();
304	cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
305	while (1) {
306		if (time_after_eq(jiffies, timeout)) {
307			dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
308			rc = -EBUSY;
309			goto out;
310		}
311		state = be64_to_cpup(ctx->afu->sw_command_status);
312		if (state == ~0ULL) {
313			pr_err("cxl: Error adding process element to AFU\n");
314			rc = -1;
315			goto out;
316		}
317		if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK  | CXL_SPA_SW_LINK_MASK)) ==
318		    (cmd | (cmd >> 16) | ctx->pe))
319			break;
320		/*
321		 * The command won't finish in the PSL if there are
322		 * outstanding DSIs.  Hence we need to yield here in
323		 * case there are outstanding DSIs that we need to
324		 * service.  Tuning possiblity: we could wait for a
325		 * while before sched
326		 */
327		schedule();
328
329	}
330out:
331	trace_cxl_llcmd_done(ctx, cmd, rc);
332	return rc;
333}
334
335static int add_process_element(struct cxl_context *ctx)
336{
337	int rc = 0;
338
339	mutex_lock(&ctx->afu->spa_mutex);
340	pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
341	if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
342		ctx->pe_inserted = true;
343	pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
344	mutex_unlock(&ctx->afu->spa_mutex);
345	return rc;
346}
347
348static int terminate_process_element(struct cxl_context *ctx)
349{
350	int rc = 0;
351
352	/* fast path terminate if it's already invalid */
353	if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
354		return rc;
355
356	mutex_lock(&ctx->afu->spa_mutex);
357	pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
358	rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
359				    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
360	ctx->elem->software_state = 0;	/* Remove Valid bit */
361	pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
362	mutex_unlock(&ctx->afu->spa_mutex);
363	return rc;
364}
365
366static int remove_process_element(struct cxl_context *ctx)
367{
368	int rc = 0;
369
370	mutex_lock(&ctx->afu->spa_mutex);
371	pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
372	if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0)))
373		ctx->pe_inserted = false;
374	slb_invalid(ctx);
375	pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
376	mutex_unlock(&ctx->afu->spa_mutex);
377
378	return rc;
379}
380
381
382static void assign_psn_space(struct cxl_context *ctx)
383{
384	if (!ctx->afu->pp_size || ctx->master) {
385		ctx->psn_phys = ctx->afu->psn_phys;
386		ctx->psn_size = ctx->afu->adapter->ps_size;
387	} else {
388		ctx->psn_phys = ctx->afu->psn_phys +
389			(ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe);
390		ctx->psn_size = ctx->afu->pp_size;
391	}
392}
393
394static int activate_afu_directed(struct cxl_afu *afu)
395{
396	int rc;
397
398	dev_info(&afu->dev, "Activating AFU directed mode\n");
399
400	if (alloc_spa(afu))
401		return -ENOMEM;
402
403	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
404	cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
405	cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
406
407	afu->current_mode = CXL_MODE_DIRECTED;
408	afu->num_procs = afu->max_procs_virtualised;
409
410	if ((rc = cxl_chardev_m_afu_add(afu)))
411		return rc;
412
413	if ((rc = cxl_sysfs_afu_m_add(afu)))
414		goto err;
415
416	if ((rc = cxl_chardev_s_afu_add(afu)))
417		goto err1;
418
419	return 0;
420err1:
421	cxl_sysfs_afu_m_remove(afu);
422err:
423	cxl_chardev_afu_remove(afu);
424	return rc;
425}
426
427#ifdef CONFIG_CPU_LITTLE_ENDIAN
428#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
429#else
430#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
431#endif
432
433static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
434{
435	u64 sr;
436	int r, result;
437
438	assign_psn_space(ctx);
439
440	ctx->elem->ctxtime = 0; /* disable */
441	ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
442	ctx->elem->haurp = 0; /* disable */
443	ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
444
445	sr = 0;
446	if (ctx->master)
447		sr |= CXL_PSL_SR_An_MP;
448	if (mfspr(SPRN_LPCR) & LPCR_TC)
449		sr |= CXL_PSL_SR_An_TC;
450	/* HV=0, PR=1, R=1 for userspace
451	 * For kernel contexts: this would need to change
452	 */
453	sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
454	set_endian(sr);
455	sr &= ~(CXL_PSL_SR_An_HV);
456	if (!test_tsk_thread_flag(current, TIF_32BIT))
457		sr |= CXL_PSL_SR_An_SF;
458	ctx->elem->common.pid = cpu_to_be32(current->pid);
459	ctx->elem->common.tid = 0;
460	ctx->elem->sr = cpu_to_be64(sr);
461
462	ctx->elem->common.csrp = 0; /* disable */
463	ctx->elem->common.aurp0 = 0; /* disable */
464	ctx->elem->common.aurp1 = 0; /* disable */
465
466	cxl_prefault(ctx, wed);
467
468	ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
469	ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
470
471	for (r = 0; r < CXL_IRQ_RANGES; r++) {
472		ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
473		ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
474	}
475
476	ctx->elem->common.amr = cpu_to_be64(amr);
477	ctx->elem->common.wed = cpu_to_be64(wed);
478
479	/* first guy needs to enable */
480	if ((result = afu_check_and_enable(ctx->afu)))
481		return result;
482
483	add_process_element(ctx);
484
485	return 0;
486}
487
488static int deactivate_afu_directed(struct cxl_afu *afu)
489{
490	dev_info(&afu->dev, "Deactivating AFU directed mode\n");
491
492	afu->current_mode = 0;
493	afu->num_procs = 0;
494
495	cxl_sysfs_afu_m_remove(afu);
496	cxl_chardev_afu_remove(afu);
497
498	cxl_afu_reset(afu);
499	cxl_afu_disable(afu);
500	cxl_psl_purge(afu);
501
502	release_spa(afu);
503
504	return 0;
505}
506
507static int activate_dedicated_process(struct cxl_afu *afu)
508{
509	dev_info(&afu->dev, "Activating dedicated process mode\n");
510
511	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
512
513	cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
514	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);    /* disable */
515	cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
516	cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
517	cxl_p1n_write(afu, CXL_HAURP_An, 0);       /* disable */
518	cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
519
520	cxl_p2n_write(afu, CXL_CSRP_An, 0);        /* disable */
521	cxl_p2n_write(afu, CXL_AURP0_An, 0);       /* disable */
522	cxl_p2n_write(afu, CXL_AURP1_An, 0);       /* disable */
523
524	afu->current_mode = CXL_MODE_DEDICATED;
525	afu->num_procs = 1;
526
527	return cxl_chardev_d_afu_add(afu);
528}
529
530static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
531{
532	struct cxl_afu *afu = ctx->afu;
533	u64 sr;
534	int rc;
535
536	sr = 0;
537	set_endian(sr);
538	if (ctx->master)
539		sr |= CXL_PSL_SR_An_MP;
540	if (mfspr(SPRN_LPCR) & LPCR_TC)
541		sr |= CXL_PSL_SR_An_TC;
542	sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
543	if (!test_tsk_thread_flag(current, TIF_32BIT))
544		sr |= CXL_PSL_SR_An_SF;
545	cxl_p2n_write(afu, CXL_PSL_PID_TID_An, (u64)current->pid << 32);
546	cxl_p1n_write(afu, CXL_PSL_SR_An, sr);
547
548	if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
549		return rc;
550
551	cxl_prefault(ctx, wed);
552
553	cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
554		       (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
555		       (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
556		       (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
557			((u64)ctx->irqs.offset[3] & 0xffff));
558	cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
559		       (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
560		       (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
561		       (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
562			((u64)ctx->irqs.range[3] & 0xffff));
563
564	cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
565
566	/* master only context for dedicated */
567	assign_psn_space(ctx);
568
569	if ((rc = cxl_afu_reset(afu)))
570		return rc;
571
572	cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
573
574	return afu_enable(afu);
575}
576
577static int deactivate_dedicated_process(struct cxl_afu *afu)
578{
579	dev_info(&afu->dev, "Deactivating dedicated process mode\n");
580
581	afu->current_mode = 0;
582	afu->num_procs = 0;
583
584	cxl_chardev_afu_remove(afu);
585
586	return 0;
587}
588
589int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode)
590{
591	if (mode == CXL_MODE_DIRECTED)
592		return deactivate_afu_directed(afu);
593	if (mode == CXL_MODE_DEDICATED)
594		return deactivate_dedicated_process(afu);
595	return 0;
596}
597
598int cxl_afu_deactivate_mode(struct cxl_afu *afu)
599{
600	return _cxl_afu_deactivate_mode(afu, afu->current_mode);
601}
602
603int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
604{
605	if (!mode)
606		return 0;
607	if (!(mode & afu->modes_supported))
608		return -EINVAL;
609
610	if (mode == CXL_MODE_DIRECTED)
611		return activate_afu_directed(afu);
612	if (mode == CXL_MODE_DEDICATED)
613		return activate_dedicated_process(afu);
614
615	return -EINVAL;
616}
617
618int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
619{
620	ctx->kernel = kernel;
621	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
622		return attach_afu_directed(ctx, wed, amr);
623
624	if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
625		return attach_dedicated(ctx, wed, amr);
626
627	return -EINVAL;
628}
629
630static inline int detach_process_native_dedicated(struct cxl_context *ctx)
631{
632	cxl_afu_reset(ctx->afu);
633	cxl_afu_disable(ctx->afu);
634	cxl_psl_purge(ctx->afu);
635	return 0;
636}
637
638static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
639{
640	if (!ctx->pe_inserted)
641		return 0;
642	if (terminate_process_element(ctx))
643		return -1;
644	if (remove_process_element(ctx))
645		return -1;
646
647	return 0;
648}
649
650int cxl_detach_process(struct cxl_context *ctx)
651{
652	trace_cxl_detach(ctx);
653
654	if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
655		return detach_process_native_dedicated(ctx);
656
657	return detach_process_native_afu_directed(ctx);
658}
659
660int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
661{
662	u64 pidtid;
663
664	info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
665	info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
666	info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
667	pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
668	info->pid = pidtid >> 32;
669	info->tid = pidtid & 0xffffffff;
670	info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
671	info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
672
673	return 0;
674}
675
676static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
677{
678	u64 dsisr;
679
680	pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat);
681
682	/* Clear PSL_DSISR[PE] */
683	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
684	cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
685
686	/* Write 1s to clear error status bits */
687	cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
688}
689
690int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
691{
692	trace_cxl_psl_irq_ack(ctx, tfc);
693	if (tfc)
694		cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
695	if (psl_reset_mask)
696		recover_psl_err(ctx->afu, psl_reset_mask);
697
698	return 0;
699}
700
701int cxl_check_error(struct cxl_afu *afu)
702{
703	return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
704}
705