This source file includes following definitions.
- genwqe_mapping_init
- genwqe_get_slu_id
- dma_mapping_used
- genwqe_is_privileged
1
2 #ifndef __CARD_BASE_H__
3 #define __CARD_BASE_H__
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/cdev.h>
24 #include <linux/stringify.h>
25 #include <linux/pci.h>
26 #include <linux/semaphore.h>
27 #include <linux/uaccess.h>
28 #include <linux/io.h>
29 #include <linux/debugfs.h>
30 #include <linux/slab.h>
31
32 #include <linux/genwqe/genwqe_card.h>
33 #include "genwqe_driver.h"
34
35 #define GENWQE_MSI_IRQS 4
36
37 #define GENWQE_MAX_VFS 15
38 #define GENWQE_MAX_FUNCS 16
39 #define GENWQE_CARD_NO_MAX (16 * GENWQE_MAX_FUNCS)
40
41
42 #define GENWQE_DDCB_MAX 32
43 #define GENWQE_POLLING_ENABLED 0
44 #define GENWQE_DDCB_SOFTWARE_TIMEOUT 10
45 #define GENWQE_KILL_TIMEOUT 8
46 #define GENWQE_VF_JOBTIMEOUT_MSEC 250
47 #define GENWQE_PF_JOBTIMEOUT_MSEC 8000
48 #define GENWQE_HEALTH_CHECK_INTERVAL 4
49
50
51 extern const struct attribute_group *genwqe_attribute_groups[];
52
53
54
55
56
57
58
59
60 #define PCI_DEVICE_GENWQE 0x044b
61
62 #define PCI_SUBSYSTEM_ID_GENWQE5 0x035f
63 #define PCI_SUBSYSTEM_ID_GENWQE5_NEW 0x044b
64 #define PCI_CLASSCODE_GENWQE5 0x1200
65
66 #define PCI_SUBVENDOR_ID_IBM_SRIOV 0x0000
67 #define PCI_SUBSYSTEM_ID_GENWQE5_SRIOV 0x0000
68 #define PCI_CLASSCODE_GENWQE5_SRIOV 0x1200
69
70 #define GENWQE_SLU_ARCH_REQ 2
71
72
73
74
75 struct genwqe_reg {
76 u32 addr;
77 u32 idx;
78 u64 val;
79 };
80
81
82
83
84 enum genwqe_dbg_type {
85 GENWQE_DBG_UNIT0 = 0,
86 GENWQE_DBG_UNIT1 = 1,
87 GENWQE_DBG_UNIT2 = 2,
88 GENWQE_DBG_UNIT3 = 3,
89 GENWQE_DBG_UNIT4 = 4,
90 GENWQE_DBG_UNIT5 = 5,
91 GENWQE_DBG_UNIT6 = 6,
92 GENWQE_DBG_UNIT7 = 7,
93 GENWQE_DBG_REGS = 8,
94 GENWQE_DBG_DMA = 9,
95 GENWQE_DBG_UNITS = 10,
96 };
97
98
99 #define GENWQE_INJECT_HARDWARE_FAILURE 0x00000001
100 #define GENWQE_INJECT_BUS_RESET_FAILURE 0x00000002
101 #define GENWQE_INJECT_GFIR_FATAL 0x00000004
102 #define GENWQE_INJECT_GFIR_INFO 0x00000008
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154 enum dma_mapping_type {
155 GENWQE_MAPPING_RAW = 0,
156 GENWQE_MAPPING_SGL_TEMP,
157 GENWQE_MAPPING_SGL_PINNED,
158 };
159
160
161
162
163 struct dma_mapping {
164 enum dma_mapping_type type;
165
166 void *u_vaddr;
167 void *k_vaddr;
168 dma_addr_t dma_addr;
169
170 struct page **page_list;
171 dma_addr_t *dma_list;
172 unsigned int nr_pages;
173 unsigned int size;
174
175 struct list_head card_list;
176 struct list_head pin_list;
177 int write;
178 };
179
180 static inline void genwqe_mapping_init(struct dma_mapping *m,
181 enum dma_mapping_type type)
182 {
183 memset(m, 0, sizeof(*m));
184 m->type = type;
185 m->write = 1;
186 }
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206 struct ddcb_queue {
207 int ddcb_max;
208 int ddcb_next;
209 int ddcb_act;
210 u16 ddcb_seq;
211 unsigned int ddcbs_in_flight;
212 unsigned int ddcbs_completed;
213 unsigned int ddcbs_max_in_flight;
214 unsigned int return_on_busy;
215 unsigned int wait_on_busy;
216
217 dma_addr_t ddcb_daddr;
218 struct ddcb *ddcb_vaddr;
219 struct ddcb_requ **ddcb_req;
220 wait_queue_head_t *ddcb_waitqs;
221
222 spinlock_t ddcb_lock;
223 wait_queue_head_t busy_waitq;
224
225
226 u32 IO_QUEUE_CONFIG;
227 u32 IO_QUEUE_STATUS;
228 u32 IO_QUEUE_SEGMENT;
229 u32 IO_QUEUE_INITSQN;
230 u32 IO_QUEUE_WRAP;
231 u32 IO_QUEUE_OFFSET;
232 u32 IO_QUEUE_WTIME;
233 u32 IO_QUEUE_ERRCNTS;
234 u32 IO_QUEUE_LRW;
235 };
236
237
238
239
240
241 #define GENWQE_FFDC_REGS (3 + (8 * (2 + 2 * 64)))
242
243 struct genwqe_ffdc {
244 unsigned int entries;
245 struct genwqe_reg *regs;
246 };
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 struct genwqe_dev {
269 enum genwqe_card_state card_state;
270 spinlock_t print_lock;
271
272 int card_idx;
273 u64 flags;
274
275
276 struct genwqe_ffdc ffdc[GENWQE_DBG_UNITS];
277
278
279 struct task_struct *card_thread;
280 wait_queue_head_t queue_waitq;
281 struct ddcb_queue queue;
282 unsigned int irqs_processed;
283
284
285 struct task_struct *health_thread;
286 wait_queue_head_t health_waitq;
287
288 int use_platform_recovery;
289
290
291 dev_t devnum_genwqe;
292 struct class *class_genwqe;
293 struct device *dev;
294 struct cdev cdev_genwqe;
295
296 struct dentry *debugfs_root;
297 struct dentry *debugfs_genwqe;
298
299
300 struct pci_dev *pci_dev;
301 void __iomem *mmio;
302 unsigned long mmio_len;
303 int num_vfs;
304 u32 vf_jobtimeout_msec[GENWQE_MAX_VFS];
305 int is_privileged;
306
307
308 u64 slu_unitcfg;
309 u64 app_unitcfg;
310 u64 softreset;
311 u64 err_inject;
312 u64 last_gfir;
313 char app_name[5];
314
315 spinlock_t file_lock;
316 struct list_head file_list;
317
318
319 int ddcb_software_timeout;
320 int skip_recovery;
321 int kill_timeout;
322 };
323
324
325
326
327 enum genwqe_requ_state {
328 GENWQE_REQU_NEW = 0,
329 GENWQE_REQU_ENQUEUED = 1,
330 GENWQE_REQU_TAPPED = 2,
331 GENWQE_REQU_FINISHED = 3,
332 GENWQE_REQU_STATE_MAX,
333 };
334
335
336
337
338
339
340
341
342
343
344
345
346 struct genwqe_sgl {
347 dma_addr_t sgl_dma_addr;
348 struct sg_entry *sgl;
349 size_t sgl_size;
350
351 void __user *user_addr;
352 size_t user_size;
353
354 int write;
355
356 unsigned long nr_pages;
357 unsigned long fpage_offs;
358 size_t fpage_size;
359 size_t lpage_size;
360
361 void *fpage;
362 dma_addr_t fpage_dma_addr;
363
364 void *lpage;
365 dma_addr_t lpage_dma_addr;
366 };
367
368 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
369 void __user *user_addr, size_t user_size, int write);
370
371 int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
372 dma_addr_t *dma_list);
373
374 int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl);
375
376
377
378
379
380 struct ddcb_requ {
381
382 enum genwqe_requ_state req_state;
383 int num;
384 struct ddcb_queue *queue;
385
386 struct dma_mapping dma_mappings[DDCB_FIXUPS];
387 struct genwqe_sgl sgls[DDCB_FIXUPS];
388
389
390 struct genwqe_ddcb_cmd cmd;
391 struct genwqe_debug_data debug_data;
392 };
393
394
395
396
397 struct genwqe_file {
398 struct genwqe_dev *cd;
399 struct genwqe_driver *client;
400 struct file *filp;
401
402 struct fasync_struct *async_queue;
403 struct pid *opener;
404 struct list_head list;
405
406 spinlock_t map_lock;
407 struct list_head map_list;
408
409 spinlock_t pin_lock;
410 struct list_head pin_list;
411 };
412
413 int genwqe_setup_service_layer(struct genwqe_dev *cd);
414 int genwqe_finish_queue(struct genwqe_dev *cd);
415 int genwqe_release_service_layer(struct genwqe_dev *cd);
416
417
418
419
420
421
422
423
424 static inline int genwqe_get_slu_id(struct genwqe_dev *cd)
425 {
426 return (int)((cd->slu_unitcfg >> 32) & 0xff);
427 }
428
429 int genwqe_ddcbs_in_flight(struct genwqe_dev *cd);
430
431 u8 genwqe_card_type(struct genwqe_dev *cd);
432 int genwqe_card_reset(struct genwqe_dev *cd);
433 int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count);
434 void genwqe_reset_interrupt_capability(struct genwqe_dev *cd);
435
436 int genwqe_device_create(struct genwqe_dev *cd);
437 int genwqe_device_remove(struct genwqe_dev *cd);
438
439
440 void genwqe_init_debugfs(struct genwqe_dev *cd);
441 void genqwe_exit_debugfs(struct genwqe_dev *cd);
442
443 int genwqe_read_softreset(struct genwqe_dev *cd);
444
445
446 int genwqe_recovery_on_fatal_gfir_required(struct genwqe_dev *cd);
447 int genwqe_flash_readback_fails(struct genwqe_dev *cd);
448
449
450
451
452
453
454
455
456 int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func);
457
458
459
460
461
462
463
464
465
466 u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func);
467
468
469 int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int unit_id);
470 int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int unit_id,
471 struct genwqe_reg *regs, unsigned int max_regs);
472 int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
473 unsigned int max_regs, int all);
474 int genwqe_ffdc_dump_dma(struct genwqe_dev *cd,
475 struct genwqe_reg *regs, unsigned int max_regs);
476
477 int genwqe_init_debug_data(struct genwqe_dev *cd,
478 struct genwqe_debug_data *d);
479
480 void genwqe_init_crc32(void);
481 int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len);
482
483
484 int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
485 void *uaddr, unsigned long size);
486
487 int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m);
488
489 static inline bool dma_mapping_used(struct dma_mapping *m)
490 {
491 if (!m)
492 return false;
493 return m->size != 0;
494 }
495
496
497
498
499
500
501
502
503
504
505 int __genwqe_execute_ddcb(struct genwqe_dev *cd,
506 struct genwqe_ddcb_cmd *cmd, unsigned int f_flags);
507
508
509
510
511
512
513
514
515
516
517 int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
518 struct genwqe_ddcb_cmd *cmd,
519 unsigned int f_flags);
520 int __genwqe_enqueue_ddcb(struct genwqe_dev *cd,
521 struct ddcb_requ *req,
522 unsigned int f_flags);
523
524 int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
525 int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req);
526
527
528 int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val);
529 u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs);
530 int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val);
531 u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs);
532
533 void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
534 dma_addr_t *dma_handle);
535 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
536 void *vaddr, dma_addr_t dma_handle);
537
538
539 int genwqe_base_clock_frequency(struct genwqe_dev *cd);
540
541
542 void genwqe_stop_traps(struct genwqe_dev *cd);
543 void genwqe_start_traps(struct genwqe_dev *cd);
544
545
546 bool genwqe_need_err_masking(struct genwqe_dev *cd);
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572 static inline int genwqe_is_privileged(struct genwqe_dev *cd)
573 {
574 return cd->is_privileged;
575 }
576
577 #endif