Lines Matching refs:ctx

23 static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)  in handle_psl_slice_error()  argument
27 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); in handle_psl_slice_error()
28 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); in handle_psl_slice_error()
29 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); in handle_psl_slice_error()
30 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); in handle_psl_slice_error()
31 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); in handle_psl_slice_error()
33 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); in handle_psl_slice_error()
34 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); in handle_psl_slice_error()
35 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); in handle_psl_slice_error()
36 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); in handle_psl_slice_error()
37 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); in handle_psl_slice_error()
38 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); in handle_psl_slice_error()
40 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); in handle_psl_slice_error()
41 cxl_stop_trace(ctx->afu->adapter); in handle_psl_slice_error()
43 return cxl_ack_irq(ctx, 0, errstat); in handle_psl_slice_error()
88 static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) in schedule_cxl_fault() argument
90 ctx->dsisr = dsisr; in schedule_cxl_fault()
91 ctx->dar = dar; in schedule_cxl_fault()
92 schedule_work(&ctx->fault_work); in schedule_cxl_fault()
98 struct cxl_context *ctx = data; in cxl_irq() local
104 trace_cxl_psl_irq(ctx, irq, dsisr, dar); in cxl_irq()
106 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); in cxl_irq()
119 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); in cxl_irq()
120 return schedule_cxl_fault(ctx, dsisr, dar); in cxl_irq()
140 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); in cxl_irq()
141 return schedule_cxl_fault(ctx, dsisr, dar); in cxl_irq()
148 return handle_psl_slice_error(ctx, dsisr, irq_info->errstat); in cxl_irq()
152 if (ctx->pending_afu_err) { in cxl_irq()
160 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " in cxl_irq()
162 ctx->pe, irq_info->afu_err); in cxl_irq()
164 spin_lock(&ctx->lock); in cxl_irq()
165 ctx->afu_err = irq_info->afu_err; in cxl_irq()
166 ctx->pending_afu_err = 1; in cxl_irq()
167 spin_unlock(&ctx->lock); in cxl_irq()
169 wake_up_all(&ctx->wq); in cxl_irq()
172 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0); in cxl_irq()
195 struct cxl_context *ctx; in cxl_irq_multiplexed() local
206 ctx = idr_find(&afu->contexts_idr, ph); in cxl_irq_multiplexed()
207 if (ctx) { in cxl_irq_multiplexed()
208 ret = cxl_irq(irq, ctx, &irq_info); in cxl_irq_multiplexed()
223 struct cxl_context *ctx = data; in cxl_irq_afu() local
230 irq_off = hwirq - ctx->irqs.offset[r]; in cxl_irq_afu()
231 range = ctx->irqs.range[r]; in cxl_irq_afu()
240 ctx->pe, irq, hwirq); in cxl_irq_afu()
244 trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq); in cxl_irq_afu()
246 afu_irq, ctx->pe, irq, hwirq); in cxl_irq_afu()
248 if (unlikely(!ctx->irq_bitmap)) { in cxl_irq_afu()
252 spin_lock(&ctx->lock); in cxl_irq_afu()
253 set_bit(afu_irq - 1, ctx->irq_bitmap); in cxl_irq_afu()
254 ctx->pending_irq = true; in cxl_irq_afu()
255 spin_unlock(&ctx->lock); in cxl_irq_afu()
257 wake_up_all(&ctx->wq); in cxl_irq_afu()
416 void afu_irq_name_free(struct cxl_context *ctx) in afu_irq_name_free() argument
420 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { in afu_irq_name_free()
427 int afu_allocate_irqs(struct cxl_context *ctx, u32 count) in afu_allocate_irqs() argument
433 INIT_LIST_HEAD(&ctx->irq_names); in afu_allocate_irqs()
435 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) in afu_allocate_irqs()
439 ctx->irqs.offset[0] = ctx->afu->psl_hwirq; in afu_allocate_irqs()
440 ctx->irqs.range[0] = 1; in afu_allocate_irqs()
442 ctx->irq_count = count; in afu_allocate_irqs()
443 ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), in afu_allocate_irqs()
444 sizeof(*ctx->irq_bitmap), GFP_KERNEL); in afu_allocate_irqs()
445 if (!ctx->irq_bitmap) in afu_allocate_irqs()
453 for (i = 0; i < ctx->irqs.range[r]; i++) { in afu_allocate_irqs()
459 dev_name(&ctx->afu->dev), in afu_allocate_irqs()
460 ctx->pe, j); in afu_allocate_irqs()
466 list_add_tail(&irq_name->list, &ctx->irq_names); in afu_allocate_irqs()
473 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); in afu_allocate_irqs()
474 afu_irq_name_free(ctx); in afu_allocate_irqs()
478 static void afu_register_hwirqs(struct cxl_context *ctx) in afu_register_hwirqs() argument
485 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); in afu_register_hwirqs()
487 hwirq = ctx->irqs.offset[r]; in afu_register_hwirqs()
488 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { in afu_register_hwirqs()
489 cxl_map_irq(ctx->afu->adapter, hwirq, in afu_register_hwirqs()
490 cxl_irq_afu, ctx, irq_name->name); in afu_register_hwirqs()
496 int afu_register_irqs(struct cxl_context *ctx, u32 count) in afu_register_irqs() argument
500 rc = afu_allocate_irqs(ctx, count); in afu_register_irqs()
504 afu_register_hwirqs(ctx); in afu_register_irqs()
508 void afu_release_irqs(struct cxl_context *ctx, void *cookie) in afu_release_irqs() argument
515 hwirq = ctx->irqs.offset[r]; in afu_release_irqs()
516 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { in afu_release_irqs()
523 afu_irq_name_free(ctx); in afu_release_irqs()
524 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); in afu_release_irqs()
526 ctx->irq_count = 0; in afu_release_irqs()