This source file includes following definitions.
- EADM_LOG_HEX
- orb_init
- eadm_subchannel_start
- eadm_subchannel_clear
- eadm_subchannel_timeout
- eadm_subchannel_set_timeout
- eadm_subchannel_irq
- eadm_get_idle_sch
- eadm_start_aob
- eadm_subchannel_probe
- eadm_quiesce
- eadm_subchannel_remove
- eadm_subchannel_shutdown
- eadm_subchannel_freeze
- eadm_subchannel_restore
- eadm_subchannel_sch_event
- eadm_sch_init
- eadm_sch_exit
1
2
3
4
5
6
7
8
9 #include <linux/kernel_stat.h>
10 #include <linux/completion.h>
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/device.h>
14 #include <linux/module.h>
15 #include <linux/timer.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18
19 #include <asm/css_chars.h>
20 #include <asm/debug.h>
21 #include <asm/isc.h>
22 #include <asm/cio.h>
23 #include <asm/scsw.h>
24 #include <asm/eadm.h>
25
26 #include "eadm_sch.h"
27 #include "ioasm.h"
28 #include "cio.h"
29 #include "css.h"
30 #include "orb.h"
31
32 MODULE_DESCRIPTION("driver for s390 eadm subchannels");
33 MODULE_LICENSE("GPL");
34
35 #define EADM_TIMEOUT (7 * HZ)
36 static DEFINE_SPINLOCK(list_lock);
37 static LIST_HEAD(eadm_list);
38
39 static debug_info_t *eadm_debug;
40
41 #define EADM_LOG(imp, txt) do { \
42 debug_text_event(eadm_debug, imp, txt); \
43 } while (0)
44
45 static void EADM_LOG_HEX(int level, void *data, int length)
46 {
47 debug_event(eadm_debug, level, data, length);
48 }
49
50 static void orb_init(union orb *orb)
51 {
52 memset(orb, 0, sizeof(union orb));
53 orb->eadm.compat1 = 1;
54 orb->eadm.compat2 = 1;
55 orb->eadm.fmt = 1;
56 orb->eadm.x = 1;
57 }
58
59 static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
60 {
61 union orb *orb = &get_eadm_private(sch)->orb;
62 int cc;
63
64 orb_init(orb);
65 orb->eadm.aob = (u32)__pa(aob);
66 orb->eadm.intparm = (u32)(addr_t)sch;
67 orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
68
69 EADM_LOG(6, "start");
70 EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
71
72 cc = ssch(sch->schid, orb);
73 switch (cc) {
74 case 0:
75 sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
76 break;
77 case 1:
78 case 2:
79 return -EBUSY;
80 case 3:
81 return -ENODEV;
82 }
83 return 0;
84 }
85
86 static int eadm_subchannel_clear(struct subchannel *sch)
87 {
88 int cc;
89
90 cc = csch(sch->schid);
91 if (cc)
92 return -ENODEV;
93
94 sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
95 return 0;
96 }
97
98 static void eadm_subchannel_timeout(struct timer_list *t)
99 {
100 struct eadm_private *private = from_timer(private, t, timer);
101 struct subchannel *sch = private->sch;
102
103 spin_lock_irq(sch->lock);
104 EADM_LOG(1, "timeout");
105 EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
106 if (eadm_subchannel_clear(sch))
107 EADM_LOG(0, "clear failed");
108 spin_unlock_irq(sch->lock);
109 }
110
111 static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
112 {
113 struct eadm_private *private = get_eadm_private(sch);
114
115 if (expires == 0) {
116 del_timer(&private->timer);
117 return;
118 }
119 if (timer_pending(&private->timer)) {
120 if (mod_timer(&private->timer, jiffies + expires))
121 return;
122 }
123 private->timer.expires = jiffies + expires;
124 add_timer(&private->timer);
125 }
126
127 static void eadm_subchannel_irq(struct subchannel *sch)
128 {
129 struct eadm_private *private = get_eadm_private(sch);
130 struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
131 struct irb *irb = this_cpu_ptr(&cio_irb);
132 blk_status_t error = BLK_STS_OK;
133
134 EADM_LOG(6, "irq");
135 EADM_LOG_HEX(6, irb, sizeof(*irb));
136
137 inc_irq_stat(IRQIO_ADM);
138
139 if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
140 && scsw->eswf == 1 && irb->esw.eadm.erw.r)
141 error = BLK_STS_IOERR;
142
143 if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
144 error = BLK_STS_TIMEOUT;
145
146 eadm_subchannel_set_timeout(sch, 0);
147
148 if (private->state != EADM_BUSY) {
149 EADM_LOG(1, "irq unsol");
150 EADM_LOG_HEX(1, irb, sizeof(*irb));
151 private->state = EADM_NOT_OPER;
152 css_sched_sch_todo(sch, SCH_TODO_EVAL);
153 return;
154 }
155 scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
156 private->state = EADM_IDLE;
157
158 if (private->completion)
159 complete(private->completion);
160 }
161
162 static struct subchannel *eadm_get_idle_sch(void)
163 {
164 struct eadm_private *private;
165 struct subchannel *sch;
166 unsigned long flags;
167
168 spin_lock_irqsave(&list_lock, flags);
169 list_for_each_entry(private, &eadm_list, head) {
170 sch = private->sch;
171 spin_lock(sch->lock);
172 if (private->state == EADM_IDLE) {
173 private->state = EADM_BUSY;
174 list_move_tail(&private->head, &eadm_list);
175 spin_unlock(sch->lock);
176 spin_unlock_irqrestore(&list_lock, flags);
177
178 return sch;
179 }
180 spin_unlock(sch->lock);
181 }
182 spin_unlock_irqrestore(&list_lock, flags);
183
184 return NULL;
185 }
186
187 int eadm_start_aob(struct aob *aob)
188 {
189 struct eadm_private *private;
190 struct subchannel *sch;
191 unsigned long flags;
192 int ret;
193
194 sch = eadm_get_idle_sch();
195 if (!sch)
196 return -EBUSY;
197
198 spin_lock_irqsave(sch->lock, flags);
199 eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
200 ret = eadm_subchannel_start(sch, aob);
201 if (!ret)
202 goto out_unlock;
203
204
205 eadm_subchannel_set_timeout(sch, 0);
206 private = get_eadm_private(sch);
207 private->state = EADM_NOT_OPER;
208 css_sched_sch_todo(sch, SCH_TODO_EVAL);
209
210 out_unlock:
211 spin_unlock_irqrestore(sch->lock, flags);
212
213 return ret;
214 }
215 EXPORT_SYMBOL_GPL(eadm_start_aob);
216
217 static int eadm_subchannel_probe(struct subchannel *sch)
218 {
219 struct eadm_private *private;
220 int ret;
221
222 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
223 if (!private)
224 return -ENOMEM;
225
226 INIT_LIST_HEAD(&private->head);
227 timer_setup(&private->timer, eadm_subchannel_timeout, 0);
228
229 spin_lock_irq(sch->lock);
230 set_eadm_private(sch, private);
231 private->state = EADM_IDLE;
232 private->sch = sch;
233 sch->isc = EADM_SCH_ISC;
234 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
235 if (ret) {
236 set_eadm_private(sch, NULL);
237 spin_unlock_irq(sch->lock);
238 kfree(private);
239 goto out;
240 }
241 spin_unlock_irq(sch->lock);
242
243 spin_lock_irq(&list_lock);
244 list_add(&private->head, &eadm_list);
245 spin_unlock_irq(&list_lock);
246
247 if (dev_get_uevent_suppress(&sch->dev)) {
248 dev_set_uevent_suppress(&sch->dev, 0);
249 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
250 }
251 out:
252 return ret;
253 }
254
255 static void eadm_quiesce(struct subchannel *sch)
256 {
257 struct eadm_private *private = get_eadm_private(sch);
258 DECLARE_COMPLETION_ONSTACK(completion);
259 int ret;
260
261 spin_lock_irq(sch->lock);
262 if (private->state != EADM_BUSY)
263 goto disable;
264
265 if (eadm_subchannel_clear(sch))
266 goto disable;
267
268 private->completion = &completion;
269 spin_unlock_irq(sch->lock);
270
271 wait_for_completion_io(&completion);
272
273 spin_lock_irq(sch->lock);
274 private->completion = NULL;
275
276 disable:
277 eadm_subchannel_set_timeout(sch, 0);
278 do {
279 ret = cio_disable_subchannel(sch);
280 } while (ret == -EBUSY);
281
282 spin_unlock_irq(sch->lock);
283 }
284
285 static int eadm_subchannel_remove(struct subchannel *sch)
286 {
287 struct eadm_private *private = get_eadm_private(sch);
288
289 spin_lock_irq(&list_lock);
290 list_del(&private->head);
291 spin_unlock_irq(&list_lock);
292
293 eadm_quiesce(sch);
294
295 spin_lock_irq(sch->lock);
296 set_eadm_private(sch, NULL);
297 spin_unlock_irq(sch->lock);
298
299 kfree(private);
300
301 return 0;
302 }
303
304 static void eadm_subchannel_shutdown(struct subchannel *sch)
305 {
306 eadm_quiesce(sch);
307 }
308
309 static int eadm_subchannel_freeze(struct subchannel *sch)
310 {
311 return cio_disable_subchannel(sch);
312 }
313
314 static int eadm_subchannel_restore(struct subchannel *sch)
315 {
316 return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
317 }
318
319
320
321
322
323
324
325
326
327
328
329 static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
330 {
331 struct eadm_private *private;
332 unsigned long flags;
333
334 spin_lock_irqsave(sch->lock, flags);
335 if (!device_is_registered(&sch->dev))
336 goto out_unlock;
337
338 if (work_pending(&sch->todo_work))
339 goto out_unlock;
340
341 if (cio_update_schib(sch)) {
342 css_sched_sch_todo(sch, SCH_TODO_UNREG);
343 goto out_unlock;
344 }
345 private = get_eadm_private(sch);
346 if (private->state == EADM_NOT_OPER)
347 private->state = EADM_IDLE;
348
349 out_unlock:
350 spin_unlock_irqrestore(sch->lock, flags);
351
352 return 0;
353 }
354
355 static struct css_device_id eadm_subchannel_ids[] = {
356 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
357 { },
358 };
359 MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
360
361 static struct css_driver eadm_subchannel_driver = {
362 .drv = {
363 .name = "eadm_subchannel",
364 .owner = THIS_MODULE,
365 },
366 .subchannel_type = eadm_subchannel_ids,
367 .irq = eadm_subchannel_irq,
368 .probe = eadm_subchannel_probe,
369 .remove = eadm_subchannel_remove,
370 .shutdown = eadm_subchannel_shutdown,
371 .sch_event = eadm_subchannel_sch_event,
372 .freeze = eadm_subchannel_freeze,
373 .thaw = eadm_subchannel_restore,
374 .restore = eadm_subchannel_restore,
375 };
376
377 static int __init eadm_sch_init(void)
378 {
379 int ret;
380
381 if (!css_general_characteristics.eadm)
382 return -ENXIO;
383
384 eadm_debug = debug_register("eadm_log", 16, 1, 16);
385 if (!eadm_debug)
386 return -ENOMEM;
387
388 debug_register_view(eadm_debug, &debug_hex_ascii_view);
389 debug_set_level(eadm_debug, 2);
390
391 isc_register(EADM_SCH_ISC);
392 ret = css_driver_register(&eadm_subchannel_driver);
393 if (ret)
394 goto cleanup;
395
396 return ret;
397
398 cleanup:
399 isc_unregister(EADM_SCH_ISC);
400 debug_unregister(eadm_debug);
401 return ret;
402 }
403
404 static void __exit eadm_sch_exit(void)
405 {
406 css_driver_unregister(&eadm_subchannel_driver);
407 isc_unregister(EADM_SCH_ISC);
408 debug_unregister(eadm_debug);
409 }
410 module_init(eadm_sch_init);
411 module_exit(eadm_sch_exit);