This source file includes following definitions.
- idt_nt_write
- idt_nt_read
- idt_sw_write
- idt_sw_read
- idt_reg_set_bits
- idt_reg_clear_bits
- idt_scan_ports
- idt_ntb_port_number
- idt_ntb_peer_port_count
- idt_ntb_peer_port_number
- idt_ntb_peer_port_idx
- idt_init_link
- idt_deinit_link
- idt_se_isr
- idt_ntb_local_link_enable
- idt_ntb_local_link_disable
- idt_ntb_local_link_is_up
- idt_ntb_peer_link_is_up
- idt_ntb_link_is_up
- idt_ntb_link_enable
- idt_ntb_link_disable
- idt_get_mw_count
- idt_get_mw_name
- idt_scan_mws
- idt_init_mws
- idt_ntb_mw_count
- idt_ntb_mw_get_align
- idt_ntb_peer_mw_count
- idt_ntb_peer_mw_get_addr
- idt_ntb_peer_mw_set_trans
- idt_ntb_peer_mw_clear_trans
- idt_db_isr
- idt_ntb_db_valid_mask
- idt_ntb_db_read
- idt_ntb_db_clear
- idt_ntb_db_read_mask
- idt_ntb_db_set_mask
- idt_ntb_db_clear_mask
- idt_ntb_peer_db_set
- idt_init_msg
- idt_msg_isr
- idt_ntb_msg_count
- idt_ntb_msg_inbits
- idt_ntb_msg_outbits
- idt_ntb_msg_read_sts
- idt_ntb_msg_clear_sts
- idt_ntb_msg_set_mask
- idt_ntb_msg_clear_mask
- idt_ntb_msg_read
- idt_ntb_peer_msg_write
- idt_get_deg
- idt_get_deg_frac
- idt_temp_get_fmt
- idt_get_temp_sval
- idt_get_temp_uval
- idt_read_temp
- idt_write_temp
- idt_sysfs_show_temp
- idt_sysfs_set_temp
- idt_sysfs_reset_hist
- idt_init_temp
- idt_init_isr
- idt_deinit_isr
- idt_thread_isr
- idt_register_device
- idt_unregister_device
- idt_dbgfs_info_read
- idt_init_dbgfs
- idt_deinit_dbgfs
- idt_check_setup
- idt_create_dev
- idt_init_pci
- idt_deinit_pci
- idt_pci_probe
- idt_pci_remove
- idt_pci_driver_init
- idt_pci_driver_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #include <linux/stddef.h>
43 #include <linux/types.h>
44 #include <linux/kernel.h>
45 #include <linux/bitops.h>
46 #include <linux/sizes.h>
47 #include <linux/module.h>
48 #include <linux/moduleparam.h>
49 #include <linux/init.h>
50 #include <linux/interrupt.h>
51 #include <linux/spinlock.h>
52 #include <linux/mutex.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/slab.h>
56 #include <linux/list.h>
57 #include <linux/debugfs.h>
58 #include <linux/hwmon.h>
59 #include <linux/hwmon-sysfs.h>
60 #include <linux/ntb.h>
61
62 #include "ntb_hw_idt.h"
63
64 #define NTB_NAME "ntb_hw_idt"
65 #define NTB_DESC "IDT PCI-E Non-Transparent Bridge Driver"
66 #define NTB_VER "2.0"
67 #define NTB_IRQNAME "ntb_irq_idt"
68
69 MODULE_DESCRIPTION(NTB_DESC);
70 MODULE_VERSION(NTB_VER);
71 MODULE_LICENSE("GPL v2");
72 MODULE_AUTHOR("T-platforms");
73
74
75
76
77
78 static const struct idt_ntb_regs ntdata_tbl = {
79 { {IDT_NT_BARSETUP0, IDT_NT_BARLIMIT0,
80 IDT_NT_BARLTBASE0, IDT_NT_BARUTBASE0},
81 {IDT_NT_BARSETUP1, IDT_NT_BARLIMIT1,
82 IDT_NT_BARLTBASE1, IDT_NT_BARUTBASE1},
83 {IDT_NT_BARSETUP2, IDT_NT_BARLIMIT2,
84 IDT_NT_BARLTBASE2, IDT_NT_BARUTBASE2},
85 {IDT_NT_BARSETUP3, IDT_NT_BARLIMIT3,
86 IDT_NT_BARLTBASE3, IDT_NT_BARUTBASE3},
87 {IDT_NT_BARSETUP4, IDT_NT_BARLIMIT4,
88 IDT_NT_BARLTBASE4, IDT_NT_BARUTBASE4},
89 {IDT_NT_BARSETUP5, IDT_NT_BARLIMIT5,
90 IDT_NT_BARLTBASE5, IDT_NT_BARUTBASE5} },
91 { {IDT_NT_INMSG0, IDT_NT_OUTMSG0, IDT_NT_INMSGSRC0},
92 {IDT_NT_INMSG1, IDT_NT_OUTMSG1, IDT_NT_INMSGSRC1},
93 {IDT_NT_INMSG2, IDT_NT_OUTMSG2, IDT_NT_INMSGSRC2},
94 {IDT_NT_INMSG3, IDT_NT_OUTMSG3, IDT_NT_INMSGSRC3} }
95 };
96
97
98
99
100
101 static const struct idt_ntb_port portdata_tbl[IDT_MAX_NR_PORTS] = {
102 { IDT_SW_NTP0_PCIECMDSTS, IDT_SW_NTP0_PCIELCTLSTS,
103 IDT_SW_NTP0_NTCTL,
104 IDT_SW_SWPORT0CTL, IDT_SW_SWPORT0STS,
105 { {IDT_SW_NTP0_BARSETUP0, IDT_SW_NTP0_BARLIMIT0,
106 IDT_SW_NTP0_BARLTBASE0, IDT_SW_NTP0_BARUTBASE0},
107 {IDT_SW_NTP0_BARSETUP1, IDT_SW_NTP0_BARLIMIT1,
108 IDT_SW_NTP0_BARLTBASE1, IDT_SW_NTP0_BARUTBASE1},
109 {IDT_SW_NTP0_BARSETUP2, IDT_SW_NTP0_BARLIMIT2,
110 IDT_SW_NTP0_BARLTBASE2, IDT_SW_NTP0_BARUTBASE2},
111 {IDT_SW_NTP0_BARSETUP3, IDT_SW_NTP0_BARLIMIT3,
112 IDT_SW_NTP0_BARLTBASE3, IDT_SW_NTP0_BARUTBASE3},
113 {IDT_SW_NTP0_BARSETUP4, IDT_SW_NTP0_BARLIMIT4,
114 IDT_SW_NTP0_BARLTBASE4, IDT_SW_NTP0_BARUTBASE4},
115 {IDT_SW_NTP0_BARSETUP5, IDT_SW_NTP0_BARLIMIT5,
116 IDT_SW_NTP0_BARLTBASE5, IDT_SW_NTP0_BARUTBASE5} } },
117 {0},
118 { IDT_SW_NTP2_PCIECMDSTS, IDT_SW_NTP2_PCIELCTLSTS,
119 IDT_SW_NTP2_NTCTL,
120 IDT_SW_SWPORT2CTL, IDT_SW_SWPORT2STS,
121 { {IDT_SW_NTP2_BARSETUP0, IDT_SW_NTP2_BARLIMIT0,
122 IDT_SW_NTP2_BARLTBASE0, IDT_SW_NTP2_BARUTBASE0},
123 {IDT_SW_NTP2_BARSETUP1, IDT_SW_NTP2_BARLIMIT1,
124 IDT_SW_NTP2_BARLTBASE1, IDT_SW_NTP2_BARUTBASE1},
125 {IDT_SW_NTP2_BARSETUP2, IDT_SW_NTP2_BARLIMIT2,
126 IDT_SW_NTP2_BARLTBASE2, IDT_SW_NTP2_BARUTBASE2},
127 {IDT_SW_NTP2_BARSETUP3, IDT_SW_NTP2_BARLIMIT3,
128 IDT_SW_NTP2_BARLTBASE3, IDT_SW_NTP2_BARUTBASE3},
129 {IDT_SW_NTP2_BARSETUP4, IDT_SW_NTP2_BARLIMIT4,
130 IDT_SW_NTP2_BARLTBASE4, IDT_SW_NTP2_BARUTBASE4},
131 {IDT_SW_NTP2_BARSETUP5, IDT_SW_NTP2_BARLIMIT5,
132 IDT_SW_NTP2_BARLTBASE5, IDT_SW_NTP2_BARUTBASE5} } },
133 {0},
134 { IDT_SW_NTP4_PCIECMDSTS, IDT_SW_NTP4_PCIELCTLSTS,
135 IDT_SW_NTP4_NTCTL,
136 IDT_SW_SWPORT4CTL, IDT_SW_SWPORT4STS,
137 { {IDT_SW_NTP4_BARSETUP0, IDT_SW_NTP4_BARLIMIT0,
138 IDT_SW_NTP4_BARLTBASE0, IDT_SW_NTP4_BARUTBASE0},
139 {IDT_SW_NTP4_BARSETUP1, IDT_SW_NTP4_BARLIMIT1,
140 IDT_SW_NTP4_BARLTBASE1, IDT_SW_NTP4_BARUTBASE1},
141 {IDT_SW_NTP4_BARSETUP2, IDT_SW_NTP4_BARLIMIT2,
142 IDT_SW_NTP4_BARLTBASE2, IDT_SW_NTP4_BARUTBASE2},
143 {IDT_SW_NTP4_BARSETUP3, IDT_SW_NTP4_BARLIMIT3,
144 IDT_SW_NTP4_BARLTBASE3, IDT_SW_NTP4_BARUTBASE3},
145 {IDT_SW_NTP4_BARSETUP4, IDT_SW_NTP4_BARLIMIT4,
146 IDT_SW_NTP4_BARLTBASE4, IDT_SW_NTP4_BARUTBASE4},
147 {IDT_SW_NTP4_BARSETUP5, IDT_SW_NTP4_BARLIMIT5,
148 IDT_SW_NTP4_BARLTBASE5, IDT_SW_NTP4_BARUTBASE5} } },
149 {0},
150 { IDT_SW_NTP6_PCIECMDSTS, IDT_SW_NTP6_PCIELCTLSTS,
151 IDT_SW_NTP6_NTCTL,
152 IDT_SW_SWPORT6CTL, IDT_SW_SWPORT6STS,
153 { {IDT_SW_NTP6_BARSETUP0, IDT_SW_NTP6_BARLIMIT0,
154 IDT_SW_NTP6_BARLTBASE0, IDT_SW_NTP6_BARUTBASE0},
155 {IDT_SW_NTP6_BARSETUP1, IDT_SW_NTP6_BARLIMIT1,
156 IDT_SW_NTP6_BARLTBASE1, IDT_SW_NTP6_BARUTBASE1},
157 {IDT_SW_NTP6_BARSETUP2, IDT_SW_NTP6_BARLIMIT2,
158 IDT_SW_NTP6_BARLTBASE2, IDT_SW_NTP6_BARUTBASE2},
159 {IDT_SW_NTP6_BARSETUP3, IDT_SW_NTP6_BARLIMIT3,
160 IDT_SW_NTP6_BARLTBASE3, IDT_SW_NTP6_BARUTBASE3},
161 {IDT_SW_NTP6_BARSETUP4, IDT_SW_NTP6_BARLIMIT4,
162 IDT_SW_NTP6_BARLTBASE4, IDT_SW_NTP6_BARUTBASE4},
163 {IDT_SW_NTP6_BARSETUP5, IDT_SW_NTP6_BARLIMIT5,
164 IDT_SW_NTP6_BARLTBASE5, IDT_SW_NTP6_BARUTBASE5} } },
165 {0},
166 { IDT_SW_NTP8_PCIECMDSTS, IDT_SW_NTP8_PCIELCTLSTS,
167 IDT_SW_NTP8_NTCTL,
168 IDT_SW_SWPORT8CTL, IDT_SW_SWPORT8STS,
169 { {IDT_SW_NTP8_BARSETUP0, IDT_SW_NTP8_BARLIMIT0,
170 IDT_SW_NTP8_BARLTBASE0, IDT_SW_NTP8_BARUTBASE0},
171 {IDT_SW_NTP8_BARSETUP1, IDT_SW_NTP8_BARLIMIT1,
172 IDT_SW_NTP8_BARLTBASE1, IDT_SW_NTP8_BARUTBASE1},
173 {IDT_SW_NTP8_BARSETUP2, IDT_SW_NTP8_BARLIMIT2,
174 IDT_SW_NTP8_BARLTBASE2, IDT_SW_NTP8_BARUTBASE2},
175 {IDT_SW_NTP8_BARSETUP3, IDT_SW_NTP8_BARLIMIT3,
176 IDT_SW_NTP8_BARLTBASE3, IDT_SW_NTP8_BARUTBASE3},
177 {IDT_SW_NTP8_BARSETUP4, IDT_SW_NTP8_BARLIMIT4,
178 IDT_SW_NTP8_BARLTBASE4, IDT_SW_NTP8_BARUTBASE4},
179 {IDT_SW_NTP8_BARSETUP5, IDT_SW_NTP8_BARLIMIT5,
180 IDT_SW_NTP8_BARLTBASE5, IDT_SW_NTP8_BARUTBASE5} } },
181 {0},
182 {0},
183 {0},
184 { IDT_SW_NTP12_PCIECMDSTS, IDT_SW_NTP12_PCIELCTLSTS,
185 IDT_SW_NTP12_NTCTL,
186 IDT_SW_SWPORT12CTL, IDT_SW_SWPORT12STS,
187 { {IDT_SW_NTP12_BARSETUP0, IDT_SW_NTP12_BARLIMIT0,
188 IDT_SW_NTP12_BARLTBASE0, IDT_SW_NTP12_BARUTBASE0},
189 {IDT_SW_NTP12_BARSETUP1, IDT_SW_NTP12_BARLIMIT1,
190 IDT_SW_NTP12_BARLTBASE1, IDT_SW_NTP12_BARUTBASE1},
191 {IDT_SW_NTP12_BARSETUP2, IDT_SW_NTP12_BARLIMIT2,
192 IDT_SW_NTP12_BARLTBASE2, IDT_SW_NTP12_BARUTBASE2},
193 {IDT_SW_NTP12_BARSETUP3, IDT_SW_NTP12_BARLIMIT3,
194 IDT_SW_NTP12_BARLTBASE3, IDT_SW_NTP12_BARUTBASE3},
195 {IDT_SW_NTP12_BARSETUP4, IDT_SW_NTP12_BARLIMIT4,
196 IDT_SW_NTP12_BARLTBASE4, IDT_SW_NTP12_BARUTBASE4},
197 {IDT_SW_NTP12_BARSETUP5, IDT_SW_NTP12_BARLIMIT5,
198 IDT_SW_NTP12_BARLTBASE5, IDT_SW_NTP12_BARUTBASE5} } },
199 {0},
200 {0},
201 {0},
202 { IDT_SW_NTP16_PCIECMDSTS, IDT_SW_NTP16_PCIELCTLSTS,
203 IDT_SW_NTP16_NTCTL,
204 IDT_SW_SWPORT16CTL, IDT_SW_SWPORT16STS,
205 { {IDT_SW_NTP16_BARSETUP0, IDT_SW_NTP16_BARLIMIT0,
206 IDT_SW_NTP16_BARLTBASE0, IDT_SW_NTP16_BARUTBASE0},
207 {IDT_SW_NTP16_BARSETUP1, IDT_SW_NTP16_BARLIMIT1,
208 IDT_SW_NTP16_BARLTBASE1, IDT_SW_NTP16_BARUTBASE1},
209 {IDT_SW_NTP16_BARSETUP2, IDT_SW_NTP16_BARLIMIT2,
210 IDT_SW_NTP16_BARLTBASE2, IDT_SW_NTP16_BARUTBASE2},
211 {IDT_SW_NTP16_BARSETUP3, IDT_SW_NTP16_BARLIMIT3,
212 IDT_SW_NTP16_BARLTBASE3, IDT_SW_NTP16_BARUTBASE3},
213 {IDT_SW_NTP16_BARSETUP4, IDT_SW_NTP16_BARLIMIT4,
214 IDT_SW_NTP16_BARLTBASE4, IDT_SW_NTP16_BARUTBASE4},
215 {IDT_SW_NTP16_BARSETUP5, IDT_SW_NTP16_BARLIMIT5,
216 IDT_SW_NTP16_BARLTBASE5, IDT_SW_NTP16_BARUTBASE5} } },
217 {0},
218 {0},
219 {0},
220 { IDT_SW_NTP20_PCIECMDSTS, IDT_SW_NTP20_PCIELCTLSTS,
221 IDT_SW_NTP20_NTCTL,
222 IDT_SW_SWPORT20CTL, IDT_SW_SWPORT20STS,
223 { {IDT_SW_NTP20_BARSETUP0, IDT_SW_NTP20_BARLIMIT0,
224 IDT_SW_NTP20_BARLTBASE0, IDT_SW_NTP20_BARUTBASE0},
225 {IDT_SW_NTP20_BARSETUP1, IDT_SW_NTP20_BARLIMIT1,
226 IDT_SW_NTP20_BARLTBASE1, IDT_SW_NTP20_BARUTBASE1},
227 {IDT_SW_NTP20_BARSETUP2, IDT_SW_NTP20_BARLIMIT2,
228 IDT_SW_NTP20_BARLTBASE2, IDT_SW_NTP20_BARUTBASE2},
229 {IDT_SW_NTP20_BARSETUP3, IDT_SW_NTP20_BARLIMIT3,
230 IDT_SW_NTP20_BARLTBASE3, IDT_SW_NTP20_BARUTBASE3},
231 {IDT_SW_NTP20_BARSETUP4, IDT_SW_NTP20_BARLIMIT4,
232 IDT_SW_NTP20_BARLTBASE4, IDT_SW_NTP20_BARUTBASE4},
233 {IDT_SW_NTP20_BARSETUP5, IDT_SW_NTP20_BARLIMIT5,
234 IDT_SW_NTP20_BARLTBASE5, IDT_SW_NTP20_BARUTBASE5} } },
235 {0},
236 {0},
237 {0}
238 };
239
240
241
242
243
244 static const struct idt_ntb_part partdata_tbl[IDT_MAX_NR_PARTS] = {
245 { IDT_SW_SWPART0CTL, IDT_SW_SWPART0STS,
246 {IDT_SW_SWP0MSGCTL0, IDT_SW_SWP0MSGCTL1,
247 IDT_SW_SWP0MSGCTL2, IDT_SW_SWP0MSGCTL3} },
248 { IDT_SW_SWPART1CTL, IDT_SW_SWPART1STS,
249 {IDT_SW_SWP1MSGCTL0, IDT_SW_SWP1MSGCTL1,
250 IDT_SW_SWP1MSGCTL2, IDT_SW_SWP1MSGCTL3} },
251 { IDT_SW_SWPART2CTL, IDT_SW_SWPART2STS,
252 {IDT_SW_SWP2MSGCTL0, IDT_SW_SWP2MSGCTL1,
253 IDT_SW_SWP2MSGCTL2, IDT_SW_SWP2MSGCTL3} },
254 { IDT_SW_SWPART3CTL, IDT_SW_SWPART3STS,
255 {IDT_SW_SWP3MSGCTL0, IDT_SW_SWP3MSGCTL1,
256 IDT_SW_SWP3MSGCTL2, IDT_SW_SWP3MSGCTL3} },
257 { IDT_SW_SWPART4CTL, IDT_SW_SWPART4STS,
258 {IDT_SW_SWP4MSGCTL0, IDT_SW_SWP4MSGCTL1,
259 IDT_SW_SWP4MSGCTL2, IDT_SW_SWP4MSGCTL3} },
260 { IDT_SW_SWPART5CTL, IDT_SW_SWPART5STS,
261 {IDT_SW_SWP5MSGCTL0, IDT_SW_SWP5MSGCTL1,
262 IDT_SW_SWP5MSGCTL2, IDT_SW_SWP5MSGCTL3} },
263 { IDT_SW_SWPART6CTL, IDT_SW_SWPART6STS,
264 {IDT_SW_SWP6MSGCTL0, IDT_SW_SWP6MSGCTL1,
265 IDT_SW_SWP6MSGCTL2, IDT_SW_SWP6MSGCTL3} },
266 { IDT_SW_SWPART7CTL, IDT_SW_SWPART7STS,
267 {IDT_SW_SWP7MSGCTL0, IDT_SW_SWP7MSGCTL1,
268 IDT_SW_SWP7MSGCTL2, IDT_SW_SWP7MSGCTL3} }
269 };
270
271
272
273
274 static struct dentry *dbgfs_topdir;
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299 static void idt_nt_write(struct idt_ntb_dev *ndev,
300 const unsigned int reg, const u32 data)
301 {
302
303
304
305
306 if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
307 return;
308
309
310 iowrite32(data, ndev->cfgspc + (ptrdiff_t)reg);
311 }
312
313
314
315
316
317
318
319
320
321
322 static u32 idt_nt_read(struct idt_ntb_dev *ndev, const unsigned int reg)
323 {
324
325
326
327
328 if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
329 return ~0;
330
331
332 return ioread32(ndev->cfgspc + (ptrdiff_t)reg);
333 }
334
335
336
337
338
339
340
341
342
343 static void idt_sw_write(struct idt_ntb_dev *ndev,
344 const unsigned int reg, const u32 data)
345 {
346 unsigned long irqflags;
347
348
349
350
351
352 if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
353 return;
354
355
356 spin_lock_irqsave(&ndev->gasa_lock, irqflags);
357
358 iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
359
360 iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
361
362 spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
363 }
364
365
366
367
368
369
370
371
372
373
374 static u32 idt_sw_read(struct idt_ntb_dev *ndev, const unsigned int reg)
375 {
376 unsigned long irqflags;
377 u32 data;
378
379
380
381
382
383 if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN)))
384 return ~0;
385
386
387 spin_lock_irqsave(&ndev->gasa_lock, irqflags);
388
389 iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR);
390
391 data = ioread32(ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA);
392
393 spin_unlock_irqrestore(&ndev->gasa_lock, irqflags);
394
395 return data;
396 }
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414 static inline int idt_reg_set_bits(struct idt_ntb_dev *ndev, unsigned int reg,
415 spinlock_t *reg_lock,
416 u64 valid_mask, u64 set_bits)
417 {
418 unsigned long irqflags;
419 u32 data;
420
421 if (set_bits & ~(u64)valid_mask)
422 return -EINVAL;
423
424
425 spin_lock_irqsave(reg_lock, irqflags);
426 data = idt_nt_read(ndev, reg) | (u32)set_bits;
427 idt_nt_write(ndev, reg, data);
428
429 spin_unlock_irqrestore(reg_lock, irqflags);
430
431 return 0;
432 }
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450 static inline void idt_reg_clear_bits(struct idt_ntb_dev *ndev,
451 unsigned int reg, spinlock_t *reg_lock,
452 u64 clear_bits)
453 {
454 unsigned long irqflags;
455 u32 data;
456
457
458 spin_lock_irqsave(reg_lock, irqflags);
459 data = idt_nt_read(ndev, reg) & ~(u32)clear_bits;
460 idt_nt_write(ndev, reg, data);
461
462 spin_unlock_irqrestore(reg_lock, irqflags);
463 }
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480 static int idt_scan_ports(struct idt_ntb_dev *ndev)
481 {
482 unsigned char pidx, port, part;
483 u32 data, portsts, partsts;
484
485
486 data = idt_nt_read(ndev, IDT_NT_PCIELCAP);
487 ndev->port = GET_FIELD(PCIELCAP_PORTNUM, data);
488
489
490 portsts = idt_sw_read(ndev, portdata_tbl[ndev->port].sts);
491 ndev->part = GET_FIELD(SWPORTxSTS_SWPART, portsts);
492
493
494 memset(ndev->port_idx_map, -EINVAL, sizeof(ndev->port_idx_map));
495 memset(ndev->part_idx_map, -EINVAL, sizeof(ndev->part_idx_map));
496
497
498
499
500
501 ndev->peer_cnt = 0;
502 for (pidx = 0; pidx < ndev->swcfg->port_cnt; pidx++) {
503 port = ndev->swcfg->ports[pidx];
504
505 if (port == ndev->port)
506 continue;
507
508
509 portsts = idt_sw_read(ndev, portdata_tbl[port].sts);
510 part = GET_FIELD(SWPORTxSTS_SWPART, portsts);
511
512
513 partsts = idt_sw_read(ndev, partdata_tbl[part].sts);
514
515 if (IS_FLD_SET(SWPARTxSTS_STATE, partsts, ACT) &&
516 (IS_FLD_SET(SWPORTxSTS_MODE, portsts, NT) ||
517 IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNT) ||
518 IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNTDMA) ||
519 IS_FLD_SET(SWPORTxSTS_MODE, portsts, NTDMA))) {
520
521 ndev->peers[ndev->peer_cnt].port = port;
522 ndev->peers[ndev->peer_cnt].part = part;
523
524 ndev->port_idx_map[port] = ndev->peer_cnt;
525 ndev->part_idx_map[part] = ndev->peer_cnt;
526 ndev->peer_cnt++;
527 }
528 }
529
530 dev_dbg(&ndev->ntb.pdev->dev, "Local port: %hhu, num of peers: %hhu\n",
531 ndev->port, ndev->peer_cnt);
532
533
534 if (ndev->peer_cnt == 0) {
535 dev_warn(&ndev->ntb.pdev->dev, "No active peer found\n");
536 return -ENODEV;
537 }
538
539 return 0;
540 }
541
542
543
544
545
546
547
548 static int idt_ntb_port_number(struct ntb_dev *ntb)
549 {
550 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
551
552 return ndev->port;
553 }
554
555
556
557
558
559
560
561
562
563 static int idt_ntb_peer_port_count(struct ntb_dev *ntb)
564 {
565 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
566
567 return ndev->peer_cnt;
568 }
569
570
571
572
573
574
575
576
577 static int idt_ntb_peer_port_number(struct ntb_dev *ntb, int pidx)
578 {
579 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
580
581 if (pidx < 0 || ndev->peer_cnt <= pidx)
582 return -EINVAL;
583
584
585 return ndev->peers[pidx].port;
586 }
587
588
589
590
591
592
593
594
595
596
597
598 static int idt_ntb_peer_port_idx(struct ntb_dev *ntb, int port)
599 {
600 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
601
602 if (port < 0 || IDT_MAX_NR_PORTS <= port)
603 return -EINVAL;
604
605 return ndev->port_idx_map[port];
606 }
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621 static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev);
622
623
624
625
626
627
628
629
630
631
632
633
634
635 static void idt_init_link(struct idt_ntb_dev *ndev)
636 {
637 u32 part_mask, port_mask, se_mask;
638 unsigned char pidx;
639
640
641 spin_lock_init(&ndev->mtbl_lock);
642
643
644 port_mask = ~BIT(ndev->port);
645 part_mask = ~BIT(ndev->part);
646 for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
647 port_mask &= ~BIT(ndev->peers[pidx].port);
648 part_mask &= ~BIT(ndev->peers[pidx].part);
649 }
650
651
652 idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1);
653 idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1);
654 idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1);
655
656
657 idt_sw_write(ndev, IDT_SW_SEPMSK, part_mask);
658
659
660 idt_sw_write(ndev, IDT_SW_SELINKUPMSK, port_mask);
661
662
663 idt_sw_write(ndev, IDT_SW_SELINKDNMSK, port_mask);
664
665
666 idt_sw_write(ndev, IDT_SW_SEGSIGMSK, part_mask);
667
668
669 se_mask = ~(IDT_SEMSK_LINKUP | IDT_SEMSK_LINKDN | IDT_SEMSK_GSIGNAL);
670 idt_sw_write(ndev, IDT_SW_SEMSK, se_mask);
671
672 dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events initialized");
673 }
674
675
676
677
678
679
680
681 static void idt_deinit_link(struct idt_ntb_dev *ndev)
682 {
683
684 idt_ntb_local_link_disable(ndev);
685
686 dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events deinitialized");
687 }
688
689
690
691
692
693
694
695
696
697
698
699 static void idt_se_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
700 {
701 u32 sests;
702
703
704 sests = idt_sw_read(ndev, IDT_SW_SESTS);
705
706
707 idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1);
708 idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1);
709 idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1);
710
711
712 idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_SEVENT);
713
714 dev_dbg(&ndev->ntb.pdev->dev, "SE IRQ detected %#08x (SESTS %#08x)",
715 ntint_sts, sests);
716
717
718 ntb_link_event(&ndev->ntb);
719 }
720
721
722
723
724
725
726
727
728
729
730 static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev)
731 {
732 u32 reqid, mtbldata = 0;
733 unsigned long irqflags;
734
735
736 idt_nt_write(ndev, IDT_NT_NTCTL, IDT_NTCTL_CPEN);
737
738
739 reqid = idt_nt_read(ndev, IDT_NT_REQIDCAP);
740
741
742
743
744
745 mtbldata = SET_FIELD(NTMTBLDATA_REQID, 0, reqid) |
746 SET_FIELD(NTMTBLDATA_PART, 0, ndev->part) |
747 IDT_NTMTBLDATA_VALID;
748 spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
749 idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
750 idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata);
751 spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
752
753
754 idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET);
755 idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part);
756 }
757
758
759
760
761
762
763
764
765
766
767 static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev)
768 {
769 unsigned long irqflags;
770
771
772 idt_nt_write(ndev, IDT_NT_NTCTL, 0);
773
774
775 spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
776 idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
777 idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0);
778 spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
779
780
781 idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET);
782 idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part);
783 }
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798 static bool idt_ntb_local_link_is_up(struct idt_ntb_dev *ndev)
799 {
800 unsigned long irqflags;
801 u32 data;
802
803
804 data = idt_nt_read(ndev, IDT_NT_PCICMDSTS);
805 if (!(data & IDT_PCICMDSTS_BME))
806 return false;
807
808
809 data = idt_nt_read(ndev, IDT_NT_NTCTL);
810 if (!(data & IDT_NTCTL_CPEN))
811 return false;
812
813
814 spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
815 idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part);
816 data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
817 spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
818
819 return !!(data & IDT_NTMTBLDATA_VALID);
820 }
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835 static bool idt_ntb_peer_link_is_up(struct idt_ntb_dev *ndev, int pidx)
836 {
837 unsigned long irqflags;
838 unsigned char port;
839 u32 data;
840
841
842 port = ndev->peers[pidx].port;
843
844
845 data = idt_sw_read(ndev, portdata_tbl[port].sts);
846 if (!(data & IDT_SWPORTxSTS_LINKUP))
847 return false;
848
849
850 data = idt_sw_read(ndev, portdata_tbl[port].pcicmdsts);
851 if (!(data & IDT_PCICMDSTS_BME))
852 return false;
853
854
855 data = idt_sw_read(ndev, portdata_tbl[port].ntctl);
856 if (!(data & IDT_NTCTL_CPEN))
857 return false;
858
859
860 spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
861 idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->peers[pidx].part);
862 data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
863 spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
864
865 return !!(data & IDT_NTMTBLDATA_VALID);
866 }
867
868
869
870
871
872
873
874
875
876
877
878
879 static u64 idt_ntb_link_is_up(struct ntb_dev *ntb,
880 enum ntb_speed *speed, enum ntb_width *width)
881 {
882 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
883 unsigned char pidx;
884 u64 status;
885 u32 data;
886
887
888 if (speed != NULL || width != NULL) {
889 data = idt_nt_read(ndev, IDT_NT_PCIELCTLSTS);
890 if (speed != NULL)
891 *speed = GET_FIELD(PCIELCTLSTS_CLS, data);
892 if (width != NULL)
893 *width = GET_FIELD(PCIELCTLSTS_NLW, data);
894 }
895
896
897 if (!idt_ntb_local_link_is_up(ndev))
898 return 0;
899
900
901 status = 0;
902 for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
903 if (idt_ntb_peer_link_is_up(ndev, pidx))
904 status |= ((u64)1 << pidx);
905 }
906
907 return status;
908 }
909
910
911
912
913
914
915
916
917
918
919
920 static int idt_ntb_link_enable(struct ntb_dev *ntb, enum ntb_speed speed,
921 enum ntb_width width)
922 {
923 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
924
925
926 idt_ntb_local_link_enable(ndev);
927
928 dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link enabled");
929
930 return 0;
931 }
932
933
934
935
936
937
938
939
940
941 static int idt_ntb_link_disable(struct ntb_dev *ntb)
942 {
943 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
944
945
946 idt_ntb_local_link_disable(ndev);
947
948 dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link disabled");
949
950 return 0;
951 }
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992 static inline unsigned char idt_get_mw_count(enum idt_mw_type mw_type)
993 {
994 switch (mw_type) {
995 case IDT_MW_DIR:
996 return 1;
997 case IDT_MW_LUT12:
998 return 12;
999 case IDT_MW_LUT24:
1000 return 24;
1001 default:
1002 break;
1003 }
1004
1005 return 0;
1006 }
1007
1008
1009
1010
1011
1012
1013
1014 static inline char *idt_get_mw_name(enum idt_mw_type mw_type)
1015 {
1016 switch (mw_type) {
1017 case IDT_MW_DIR:
1018 return "DIR ";
1019 case IDT_MW_LUT12:
1020 return "LUT12";
1021 case IDT_MW_LUT24:
1022 return "LUT24";
1023 default:
1024 break;
1025 }
1026
1027 return "unknown";
1028 }
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
1042 unsigned char *mw_cnt)
1043 {
1044 struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws;
1045 const struct idt_ntb_bar *bars;
1046 enum idt_mw_type mw_type;
1047 unsigned char widx, bidx, en_cnt;
1048 bool bar_64bit = false;
1049 int aprt_size;
1050 u32 data;
1051
1052
1053 bars = portdata_tbl[port].bars;
1054
1055
1056 *mw_cnt = 0;
1057 for (bidx = 0; bidx < IDT_BAR_CNT; bidx += 1 + bar_64bit) {
1058
1059 data = idt_sw_read(ndev, bars[bidx].setup);
1060
1061
1062 if (!(data & IDT_BARSETUP_EN)) {
1063 bar_64bit = false;
1064 continue;
1065 }
1066
1067
1068 bar_64bit = IS_FLD_SET(BARSETUP_TYPE, data, 64);
1069
1070
1071 if (data & IDT_BARSETUP_MODE_CFG)
1072 continue;
1073
1074
1075 mw_type = GET_FIELD(BARSETUP_ATRAN, data);
1076 en_cnt = idt_get_mw_count(mw_type);
1077 aprt_size = (u64)1 << GET_FIELD(BARSETUP_SIZE, data);
1078
1079
1080 for (widx = 0; widx < en_cnt; widx++, (*mw_cnt)++) {
1081
1082
1083
1084
1085 if (*mw_cnt >= IDT_MAX_NR_MWS)
1086 return ERR_PTR(-EINVAL);
1087
1088
1089 mws[*mw_cnt].type = mw_type;
1090 mws[*mw_cnt].bar = bidx;
1091 mws[*mw_cnt].idx = widx;
1092
1093 mws[*mw_cnt].addr_align = IDT_TRANS_ALIGN;
1094
1095 if (mw_type == IDT_MW_DIR)
1096 mws[*mw_cnt].size_max = aprt_size;
1097 else if (mw_type == IDT_MW_LUT12)
1098 mws[*mw_cnt].size_max = aprt_size / 16;
1099 else
1100 mws[*mw_cnt].size_max = aprt_size / 32;
1101 mws[*mw_cnt].size_align = (mw_type == IDT_MW_DIR) ?
1102 IDT_DIR_SIZE_ALIGN : mws[*mw_cnt].size_max;
1103 }
1104 }
1105
1106
1107 ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
1108 GFP_KERNEL);
1109 if (!ret_mws)
1110 return ERR_PTR(-ENOMEM);
1111
1112
1113 memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws));
1114
1115 return ret_mws;
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 static int idt_init_mws(struct idt_ntb_dev *ndev)
1128 {
1129 struct idt_ntb_peer *peer;
1130 unsigned char pidx;
1131
1132
1133 ndev->mws = idt_scan_mws(ndev, ndev->port, &ndev->mw_cnt);
1134 if (IS_ERR(ndev->mws)) {
1135 dev_err(&ndev->ntb.pdev->dev,
1136 "Failed to scan mws of local port %hhu", ndev->port);
1137 return PTR_ERR(ndev->mws);
1138 }
1139
1140
1141 for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
1142 peer = &ndev->peers[pidx];
1143 peer->mws = idt_scan_mws(ndev, peer->port, &peer->mw_cnt);
1144 if (IS_ERR(peer->mws)) {
1145 dev_err(&ndev->ntb.pdev->dev,
1146 "Failed to scan mws of port %hhu", peer->port);
1147 return PTR_ERR(peer->mws);
1148 }
1149 }
1150
1151
1152 spin_lock_init(&ndev->lut_lock);
1153
1154 dev_dbg(&ndev->ntb.pdev->dev, "Outbound and inbound MWs initialized");
1155
1156 return 0;
1157 }
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 static int idt_ntb_mw_count(struct ntb_dev *ntb, int pidx)
1171 {
1172 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1173
1174 if (pidx < 0 || ndev->peer_cnt <= pidx)
1175 return -EINVAL;
1176
1177 return ndev->peers[pidx].mw_cnt;
1178 }
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 static int idt_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
1195 resource_size_t *addr_align,
1196 resource_size_t *size_align,
1197 resource_size_t *size_max)
1198 {
1199 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1200 struct idt_ntb_peer *peer;
1201
1202 if (pidx < 0 || ndev->peer_cnt <= pidx)
1203 return -EINVAL;
1204
1205 peer = &ndev->peers[pidx];
1206
1207 if (widx < 0 || peer->mw_cnt <= widx)
1208 return -EINVAL;
1209
1210 if (addr_align != NULL)
1211 *addr_align = peer->mws[widx].addr_align;
1212
1213 if (size_align != NULL)
1214 *size_align = peer->mws[widx].size_align;
1215
1216 if (size_max != NULL)
1217 *size_max = peer->mws[widx].size_max;
1218
1219 return 0;
1220 }
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 static int idt_ntb_peer_mw_count(struct ntb_dev *ntb)
1233 {
1234 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1235
1236 return ndev->mw_cnt;
1237 }
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 static int idt_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx,
1253 phys_addr_t *base, resource_size_t *size)
1254 {
1255 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1256
1257 if (widx < 0 || ndev->mw_cnt <= widx)
1258 return -EINVAL;
1259
1260
1261 if (base != NULL)
1262 *base = pci_resource_start(ntb->pdev, ndev->mws[widx].bar) +
1263 ndev->mws[widx].idx * ndev->mws[widx].size_max;
1264
1265
1266 if (size != NULL)
1267 *size = ndev->mws[widx].size_max;
1268
1269 return 0;
1270 }
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287 static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
1288 u64 addr, resource_size_t size)
1289 {
1290 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1291 struct idt_mw_cfg *mw_cfg;
1292 u32 data = 0, lutoff = 0;
1293
1294 if (pidx < 0 || ndev->peer_cnt <= pidx)
1295 return -EINVAL;
1296
1297 if (widx < 0 || ndev->mw_cnt <= widx)
1298 return -EINVAL;
1299
1300
1301
1302
1303
1304 mw_cfg = &ndev->mws[widx];
1305 if (!IS_ALIGNED(addr, mw_cfg->addr_align))
1306 return -EINVAL;
1307 if (!IS_ALIGNED(size, mw_cfg->size_align) || size > mw_cfg->size_max)
1308 return -EINVAL;
1309
1310
1311 if (mw_cfg->type == IDT_MW_DIR) {
1312 const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar];
1313 u64 limit;
1314
1315 data = idt_nt_read(ndev, bar->setup);
1316 data = SET_FIELD(BARSETUP_TPART, data, ndev->peers[pidx].part);
1317 idt_nt_write(ndev, bar->setup, data);
1318
1319 idt_nt_write(ndev, bar->ltbase, (u32)addr);
1320 idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32));
1321
1322 limit = pci_bus_address(ntb->pdev, mw_cfg->bar) + size;
1323 idt_nt_write(ndev, bar->limit, (u32)limit);
1324 if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
1325 idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32));
1326 } else {
1327 unsigned long irqflags;
1328
1329 lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) |
1330 SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar);
1331 data = SET_FIELD(LUTUDATA_PART, 0, ndev->peers[pidx].part) |
1332 IDT_LUTUDATA_VALID;
1333 spin_lock_irqsave(&ndev->lut_lock, irqflags);
1334 idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff);
1335 idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr);
1336 idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32));
1337 idt_nt_write(ndev, IDT_NT_LUTUDATA, data);
1338 spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
1339
1340 }
1341
1342 return 0;
1343 }
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx,
1357 int widx)
1358 {
1359 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1360 struct idt_mw_cfg *mw_cfg;
1361
1362 if (pidx < 0 || ndev->peer_cnt <= pidx)
1363 return -EINVAL;
1364
1365 if (widx < 0 || ndev->mw_cnt <= widx)
1366 return -EINVAL;
1367
1368 mw_cfg = &ndev->mws[widx];
1369
1370
1371 if (mw_cfg->type == IDT_MW_DIR) {
1372 const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar];
1373 u32 data;
1374
1375 data = idt_nt_read(ndev, bar->setup);
1376
1377 idt_nt_write(ndev, bar->limit, 0);
1378 if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
1379 idt_nt_write(ndev, (bar + 1)->limit, 0);
1380 } else {
1381 unsigned long irqflags;
1382 u32 lutoff;
1383
1384 lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) |
1385 SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar);
1386 spin_lock_irqsave(&ndev->lut_lock, irqflags);
1387 idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff);
1388 idt_nt_write(ndev, IDT_NT_LUTLDATA, 0);
1389 idt_nt_write(ndev, IDT_NT_LUTMDATA, 0);
1390 idt_nt_write(ndev, IDT_NT_LUTUDATA, 0);
1391 spin_unlock_irqrestore(&ndev->lut_lock, irqflags);
1392 }
1393
1394 return 0;
1395 }
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426 static void idt_db_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
1427 {
1428
1429
1430
1431
1432 dev_dbg(&ndev->ntb.pdev->dev, "DB IRQ detected %#08x", ntint_sts);
1433
1434
1435 ntb_db_event(&ndev->ntb, 0);
1436 }
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447 static u64 idt_ntb_db_valid_mask(struct ntb_dev *ntb)
1448 {
1449 return IDT_DBELL_MASK;
1450 }
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461 static u64 idt_ntb_db_read(struct ntb_dev *ntb)
1462 {
1463 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1464
1465 return idt_nt_read(ndev, IDT_NT_INDBELLSTS);
1466 }
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 static int idt_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1482 {
1483 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1484
1485 idt_nt_write(ndev, IDT_NT_INDBELLSTS, (u32)db_bits);
1486
1487 return 0;
1488 }
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500 static u64 idt_ntb_db_read_mask(struct ntb_dev *ntb)
1501 {
1502 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1503
1504 return idt_nt_read(ndev, IDT_NT_INDBELLMSK);
1505 }
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 static int idt_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1519 {
1520 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1521
1522 return idt_reg_set_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock,
1523 IDT_DBELL_MASK, db_bits);
1524 }
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539 static int idt_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1540 {
1541 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1542
1543 idt_reg_clear_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock,
1544 db_bits);
1545
1546 return 0;
1547 }
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 static int idt_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1561 {
1562 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1563
1564 if (db_bits & ~(u64)IDT_DBELL_MASK)
1565 return -EINVAL;
1566
1567 idt_nt_write(ndev, IDT_NT_OUTDBELLSET, (u32)db_bits);
1568 return 0;
1569 }
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 static void idt_init_msg(struct idt_ntb_dev *ndev)
1589 {
1590 unsigned char midx;
1591
1592
1593 for (midx = 0; midx < IDT_MSG_CNT; midx++)
1594 spin_lock_init(&ndev->msg_locks[midx]);
1595
1596 dev_dbg(&ndev->ntb.pdev->dev, "NTB Messaging initialized");
1597 }
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609 static void idt_msg_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
1610 {
1611
1612
1613
1614
1615 dev_dbg(&ndev->ntb.pdev->dev, "Message IRQ detected %#08x", ntint_sts);
1616
1617
1618 ntb_msg_event(&ndev->ntb);
1619 }
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629 static int idt_ntb_msg_count(struct ntb_dev *ntb)
1630 {
1631 return IDT_MSG_CNT;
1632 }
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 static u64 idt_ntb_msg_inbits(struct ntb_dev *ntb)
1645 {
1646 return (u64)IDT_INMSG_MASK;
1647 }
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659 static u64 idt_ntb_msg_outbits(struct ntb_dev *ntb)
1660 {
1661 return (u64)IDT_OUTMSG_MASK;
1662 }
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673 static u64 idt_ntb_msg_read_sts(struct ntb_dev *ntb)
1674 {
1675 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1676
1677 return idt_nt_read(ndev, IDT_NT_MSGSTS);
1678 }
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 static int idt_ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits)
1694 {
1695 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1696
1697 idt_nt_write(ndev, IDT_NT_MSGSTS, sts_bits);
1698
1699 return 0;
1700 }
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 static int idt_ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits)
1713 {
1714 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1715
1716 return idt_reg_set_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock,
1717 IDT_MSG_MASK, mask_bits);
1718 }
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730 static int idt_ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
1731 {
1732 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1733
1734 idt_reg_clear_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock,
1735 mask_bits);
1736
1737 return 0;
1738 }
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751 static u32 idt_ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
1752 {
1753 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1754
1755 if (midx < 0 || IDT_MSG_CNT <= midx)
1756 return ~(u32)0;
1757
1758
1759 if (pidx != NULL) {
1760 u32 srcpart;
1761
1762 srcpart = idt_nt_read(ndev, ntdata_tbl.msgs[midx].src);
1763 *pidx = ndev->part_idx_map[srcpart];
1764
1765
1766 if (*pidx == -EINVAL)
1767 *pidx = 0;
1768 }
1769
1770
1771 return idt_nt_read(ndev, ntdata_tbl.msgs[midx].in);
1772 }
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
1788 u32 msg)
1789 {
1790 struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
1791 unsigned long irqflags;
1792 u32 swpmsgctl = 0;
1793
1794 if (midx < 0 || IDT_MSG_CNT <= midx)
1795 return -EINVAL;
1796
1797 if (pidx < 0 || ndev->peer_cnt <= pidx)
1798 return -EINVAL;
1799
1800
1801 swpmsgctl = SET_FIELD(SWPxMSGCTL_REG, 0, midx) |
1802 SET_FIELD(SWPxMSGCTL_PART, 0, ndev->peers[pidx].part);
1803
1804
1805 spin_lock_irqsave(&ndev->msg_locks[midx], irqflags);
1806
1807 idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl);
1808 idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg);
1809
1810 spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags);
1811
1812
1813 return 0;
1814 }
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834 static inline s8 idt_get_deg(long mdegC)
1835 {
1836 return mdegC / 1000;
1837 }
1838
1839
1840
1841
1842
1843
1844
1845 static inline u8 idt_get_deg_frac(long mdegC)
1846 {
1847 return (mdegC % 1000) >= 500 ? 5 : 0;
1848 }
1849
1850
1851
1852
1853
1854
1855
1856 static inline u8 idt_temp_get_fmt(long mdegC)
1857 {
1858 return (idt_get_deg(mdegC) << 1) | (idt_get_deg_frac(mdegC) ? 1 : 0);
1859 }
1860
1861
1862
1863
1864
1865
1866
1867 static inline long idt_get_temp_sval(u32 data)
1868 {
1869 return ((s8)data / 2) * 1000 + (data & 0x1 ? 500 : 0);
1870 }
1871
1872
1873
1874
1875
1876
1877
1878 static inline long idt_get_temp_uval(u32 data)
1879 {
1880 return (data / 2) * 1000 + (data & 0x1 ? 500 : 0);
1881 }
1882
1883
1884
1885
1886
1887
1888
1889 static void idt_read_temp(struct idt_ntb_dev *ndev,
1890 const enum idt_temp_val type, long *val)
1891 {
1892 u32 data;
1893
1894
1895 switch (type) {
1896 case IDT_TEMP_CUR:
1897 data = GET_FIELD(TMPSTS_TEMP,
1898 idt_sw_read(ndev, IDT_SW_TMPSTS));
1899 break;
1900 case IDT_TEMP_LOW:
1901 data = GET_FIELD(TMPSTS_LTEMP,
1902 idt_sw_read(ndev, IDT_SW_TMPSTS));
1903 break;
1904 case IDT_TEMP_HIGH:
1905 data = GET_FIELD(TMPSTS_HTEMP,
1906 idt_sw_read(ndev, IDT_SW_TMPSTS));
1907 break;
1908 case IDT_TEMP_OFFSET:
1909
1910 data = GET_FIELD(TMPADJ_OFFSET,
1911 idt_sw_read(ndev, IDT_SW_TMPADJ));
1912 *val = idt_get_temp_sval(data);
1913 return;
1914 default:
1915 data = GET_FIELD(TMPSTS_TEMP,
1916 idt_sw_read(ndev, IDT_SW_TMPSTS));
1917 break;
1918 }
1919
1920
1921 *val = idt_get_temp_uval(data);
1922 }
1923
1924
1925
1926
1927
1928
1929
1930 static void idt_write_temp(struct idt_ntb_dev *ndev,
1931 const enum idt_temp_val type, const long val)
1932 {
1933 unsigned int reg;
1934 u32 data;
1935 u8 fmt;
1936
1937
1938 fmt = idt_temp_get_fmt(val);
1939
1940 mutex_lock(&ndev->hwmon_mtx);
1941 switch (type) {
1942 case IDT_TEMP_LOW:
1943 reg = IDT_SW_TMPALARM;
1944 data = SET_FIELD(TMPALARM_LTEMP, idt_sw_read(ndev, reg), fmt) &
1945 ~IDT_TMPALARM_IRQ_MASK;
1946 break;
1947 case IDT_TEMP_HIGH:
1948 reg = IDT_SW_TMPALARM;
1949 data = SET_FIELD(TMPALARM_HTEMP, idt_sw_read(ndev, reg), fmt) &
1950 ~IDT_TMPALARM_IRQ_MASK;
1951 break;
1952 case IDT_TEMP_OFFSET:
1953 reg = IDT_SW_TMPADJ;
1954 data = SET_FIELD(TMPADJ_OFFSET, idt_sw_read(ndev, reg), fmt);
1955 break;
1956 default:
1957 goto inval_spin_unlock;
1958 }
1959
1960 idt_sw_write(ndev, reg, data);
1961
1962 inval_spin_unlock:
1963 mutex_unlock(&ndev->hwmon_mtx);
1964 }
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 static ssize_t idt_sysfs_show_temp(struct device *dev,
1975 struct device_attribute *da, char *buf)
1976 {
1977 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
1978 struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
1979 enum idt_temp_val type = attr->index;
1980 long mdeg;
1981
1982 idt_read_temp(ndev, type, &mdeg);
1983 return sprintf(buf, "%ld\n", mdeg);
1984 }
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995 static ssize_t idt_sysfs_set_temp(struct device *dev,
1996 struct device_attribute *da, const char *buf,
1997 size_t count)
1998 {
1999 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
2000 struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
2001 enum idt_temp_val type = attr->index;
2002 long mdeg;
2003 int ret;
2004
2005 ret = kstrtol(buf, 10, &mdeg);
2006 if (ret)
2007 return ret;
2008
2009
2010 if (type == IDT_TEMP_OFFSET)
2011 mdeg = clamp_val(mdeg, IDT_TEMP_MIN_OFFSET,
2012 IDT_TEMP_MAX_OFFSET);
2013 else
2014 mdeg = clamp_val(mdeg, IDT_TEMP_MIN_MDEG, IDT_TEMP_MAX_MDEG);
2015
2016 idt_write_temp(ndev, type, mdeg);
2017
2018 return count;
2019 }
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 static ssize_t idt_sysfs_reset_hist(struct device *dev,
2031 struct device_attribute *da,
2032 const char *buf, size_t count)
2033 {
2034 struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
2035
2036
2037
2038
2039 idt_write_temp(ndev, IDT_TEMP_LOW, IDT_TEMP_MAX_MDEG);
2040 idt_write_temp(ndev, IDT_TEMP_HIGH, IDT_TEMP_MIN_MDEG);
2041
2042 return count;
2043 }
2044
2045
2046
2047
2048 static SENSOR_DEVICE_ATTR(temp1_input, 0444, idt_sysfs_show_temp, NULL,
2049 IDT_TEMP_CUR);
2050 static SENSOR_DEVICE_ATTR(temp1_lowest, 0444, idt_sysfs_show_temp, NULL,
2051 IDT_TEMP_LOW);
2052 static SENSOR_DEVICE_ATTR(temp1_highest, 0444, idt_sysfs_show_temp, NULL,
2053 IDT_TEMP_HIGH);
2054 static SENSOR_DEVICE_ATTR(temp1_offset, 0644, idt_sysfs_show_temp,
2055 idt_sysfs_set_temp, IDT_TEMP_OFFSET);
2056 static DEVICE_ATTR(temp1_reset_history, 0200, NULL, idt_sysfs_reset_hist);
2057
2058
2059
2060
2061 static struct attribute *idt_temp_attrs[] = {
2062 &sensor_dev_attr_temp1_input.dev_attr.attr,
2063 &sensor_dev_attr_temp1_lowest.dev_attr.attr,
2064 &sensor_dev_attr_temp1_highest.dev_attr.attr,
2065 &sensor_dev_attr_temp1_offset.dev_attr.attr,
2066 &dev_attr_temp1_reset_history.attr,
2067 NULL
2068 };
2069 ATTRIBUTE_GROUPS(idt_temp);
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080 static void idt_init_temp(struct idt_ntb_dev *ndev)
2081 {
2082 struct device *hwmon;
2083
2084
2085 idt_sw_write(ndev, IDT_SW_TMPCTL, 0x0);
2086
2087
2088 mutex_init(&ndev->hwmon_mtx);
2089
2090 hwmon = devm_hwmon_device_register_with_groups(&ndev->ntb.pdev->dev,
2091 ndev->swcfg->name, ndev, idt_temp_groups);
2092 if (IS_ERR(hwmon)) {
2093 dev_err(&ndev->ntb.pdev->dev, "Couldn't create hwmon device");
2094 return;
2095 }
2096
2097 dev_dbg(&ndev->ntb.pdev->dev, "Temperature HWmon interface registered");
2098 }
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117 static irqreturn_t idt_thread_isr(int irq, void *devid);
2118
2119
2120
2121
2122
2123
2124
2125 static int idt_init_isr(struct idt_ntb_dev *ndev)
2126 {
2127 struct pci_dev *pdev = ndev->ntb.pdev;
2128 u32 ntint_mask;
2129 int ret;
2130
2131
2132 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
2133 if (ret != 1) {
2134 dev_err(&pdev->dev, "Failed to allocate IRQ vector");
2135 return ret;
2136 }
2137
2138
2139 ret = pci_irq_vector(pdev, 0);
2140 if (ret < 0) {
2141 dev_err(&pdev->dev, "Failed to get IRQ vector");
2142 goto err_free_vectors;
2143 }
2144
2145
2146 ret = devm_request_threaded_irq(&pdev->dev, ret, NULL, idt_thread_isr,
2147 IRQF_ONESHOT, NTB_IRQNAME, ndev);
2148 if (ret != 0) {
2149 dev_err(&pdev->dev, "Failed to set MSI IRQ handler, %d", ret);
2150 goto err_free_vectors;
2151 }
2152
2153
2154 ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL;
2155 idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
2156
2157
2158 dev_dbg(&pdev->dev, "NTB interrupts initialized");
2159
2160 return 0;
2161
2162 err_free_vectors:
2163 pci_free_irq_vectors(pdev);
2164
2165 return ret;
2166 }
2167
2168
2169
2170
2171
2172
2173
2174 static void idt_deinit_isr(struct idt_ntb_dev *ndev)
2175 {
2176 struct pci_dev *pdev = ndev->ntb.pdev;
2177 u32 ntint_mask;
2178
2179
2180 ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) | IDT_NTINTMSK_ALL;
2181 idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
2182
2183
2184 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 0), ndev);
2185
2186
2187 pci_free_irq_vectors(pdev);
2188
2189 dev_dbg(&pdev->dev, "NTB interrupts deinitialized");
2190 }
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201 static irqreturn_t idt_thread_isr(int irq, void *devid)
2202 {
2203 struct idt_ntb_dev *ndev = devid;
2204 bool handled = false;
2205 u32 ntint_sts;
2206
2207
2208 ntint_sts = idt_nt_read(ndev, IDT_NT_NTINTSTS);
2209
2210
2211 if (ntint_sts & IDT_NTINTSTS_MSG) {
2212 idt_msg_isr(ndev, ntint_sts);
2213 handled = true;
2214 }
2215
2216
2217 if (ntint_sts & IDT_NTINTSTS_DBELL) {
2218 idt_db_isr(ndev, ntint_sts);
2219 handled = true;
2220 }
2221
2222
2223 if (ntint_sts & IDT_NTINTSTS_SEVENT) {
2224 idt_se_isr(ndev, ntint_sts);
2225 handled = true;
2226 }
2227
2228 dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts);
2229
2230 return handled ? IRQ_HANDLED : IRQ_NONE;
2231 }
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241 static const struct ntb_dev_ops idt_ntb_ops = {
2242 .port_number = idt_ntb_port_number,
2243 .peer_port_count = idt_ntb_peer_port_count,
2244 .peer_port_number = idt_ntb_peer_port_number,
2245 .peer_port_idx = idt_ntb_peer_port_idx,
2246 .link_is_up = idt_ntb_link_is_up,
2247 .link_enable = idt_ntb_link_enable,
2248 .link_disable = idt_ntb_link_disable,
2249 .mw_count = idt_ntb_mw_count,
2250 .mw_get_align = idt_ntb_mw_get_align,
2251 .peer_mw_count = idt_ntb_peer_mw_count,
2252 .peer_mw_get_addr = idt_ntb_peer_mw_get_addr,
2253 .peer_mw_set_trans = idt_ntb_peer_mw_set_trans,
2254 .peer_mw_clear_trans = idt_ntb_peer_mw_clear_trans,
2255 .db_valid_mask = idt_ntb_db_valid_mask,
2256 .db_read = idt_ntb_db_read,
2257 .db_clear = idt_ntb_db_clear,
2258 .db_read_mask = idt_ntb_db_read_mask,
2259 .db_set_mask = idt_ntb_db_set_mask,
2260 .db_clear_mask = idt_ntb_db_clear_mask,
2261 .peer_db_set = idt_ntb_peer_db_set,
2262 .msg_count = idt_ntb_msg_count,
2263 .msg_inbits = idt_ntb_msg_inbits,
2264 .msg_outbits = idt_ntb_msg_outbits,
2265 .msg_read_sts = idt_ntb_msg_read_sts,
2266 .msg_clear_sts = idt_ntb_msg_clear_sts,
2267 .msg_set_mask = idt_ntb_msg_set_mask,
2268 .msg_clear_mask = idt_ntb_msg_clear_mask,
2269 .msg_read = idt_ntb_msg_read,
2270 .peer_msg_write = idt_ntb_peer_msg_write
2271 };
2272
2273
2274
2275
2276
2277
2278
2279 static int idt_register_device(struct idt_ntb_dev *ndev)
2280 {
2281 int ret;
2282
2283
2284 ndev->ntb.ops = &idt_ntb_ops;
2285 ndev->ntb.topo = NTB_TOPO_SWITCH;
2286
2287 ret = ntb_register_device(&ndev->ntb);
2288 if (ret != 0) {
2289 dev_err(&ndev->ntb.pdev->dev, "Failed to register NTB device");
2290 return ret;
2291 }
2292
2293 dev_dbg(&ndev->ntb.pdev->dev, "NTB device successfully registered");
2294
2295 return 0;
2296 }
2297
2298
2299
2300
2301
2302 static void idt_unregister_device(struct idt_ntb_dev *ndev)
2303 {
2304
2305 ntb_unregister_device(&ndev->ntb);
2306
2307 dev_dbg(&ndev->ntb.pdev->dev, "NTB device unregistered");
2308 }
2309
2310
2311
2312
2313
2314
2315 static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
2316 size_t count, loff_t *offp);
2317
2318
2319
2320
2321 static const struct file_operations idt_dbgfs_info_ops = {
2322 .owner = THIS_MODULE,
2323 .open = simple_open,
2324 .read = idt_dbgfs_info_read
2325 };
2326
2327
2328
2329
2330
2331
2332
2333
2334 static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
2335 size_t count, loff_t *offp)
2336 {
2337 struct idt_ntb_dev *ndev = filp->private_data;
2338 unsigned char idx, pidx, cnt;
2339 unsigned long irqflags, mdeg;
2340 ssize_t ret = 0, off = 0;
2341 enum ntb_speed speed;
2342 enum ntb_width width;
2343 char *strbuf;
2344 size_t size;
2345 u32 data;
2346
2347
2348 size = min_t(size_t, count, 0x1000U);
2349
2350
2351 strbuf = kmalloc(size, GFP_KERNEL);
2352 if (strbuf == NULL)
2353 return -ENOMEM;
2354
2355
2356 off += scnprintf(strbuf + off, size - off,
2357 "\n\t\tIDT NTB device Information:\n\n");
2358
2359
2360 off += scnprintf(strbuf + off, size - off,
2361 "Local Port %hhu, Partition %hhu\n", ndev->port, ndev->part);
2362
2363
2364 off += scnprintf(strbuf + off, size - off, "Peers:\n");
2365 for (idx = 0; idx < ndev->peer_cnt; idx++) {
2366 off += scnprintf(strbuf + off, size - off,
2367 "\t%hhu. Port %hhu, Partition %hhu\n",
2368 idx, ndev->peers[idx].port, ndev->peers[idx].part);
2369 }
2370
2371
2372 data = idt_ntb_link_is_up(&ndev->ntb, &speed, &width);
2373 off += scnprintf(strbuf + off, size - off,
2374 "NTB link status\t- 0x%08x, ", data);
2375 off += scnprintf(strbuf + off, size - off, "PCIe Gen %d x%d lanes\n",
2376 speed, width);
2377
2378
2379 off += scnprintf(strbuf + off, size - off, "NTB Mapping Table:\n");
2380 for (idx = 0; idx < IDT_MTBL_ENTRY_CNT; idx++) {
2381 spin_lock_irqsave(&ndev->mtbl_lock, irqflags);
2382 idt_nt_write(ndev, IDT_NT_NTMTBLADDR, idx);
2383 data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA);
2384 spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags);
2385
2386
2387 if (data & IDT_NTMTBLDATA_VALID) {
2388 off += scnprintf(strbuf + off, size - off,
2389 "\t%hhu. Partition %d, Requester ID 0x%04x\n",
2390 idx, GET_FIELD(NTMTBLDATA_PART, data),
2391 GET_FIELD(NTMTBLDATA_REQID, data));
2392 }
2393 }
2394 off += scnprintf(strbuf + off, size - off, "\n");
2395
2396
2397 off += scnprintf(strbuf + off, size - off,
2398 "Outbound Memory Windows:\n");
2399 for (idx = 0; idx < ndev->mw_cnt; idx += cnt) {
2400 data = ndev->mws[idx].type;
2401 cnt = idt_get_mw_count(data);
2402
2403
2404 if (data == IDT_MW_DIR)
2405 off += scnprintf(strbuf + off, size - off,
2406 "\t%hhu.\t", idx);
2407 else
2408 off += scnprintf(strbuf + off, size - off,
2409 "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
2410
2411 off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ",
2412 idt_get_mw_name(data), ndev->mws[idx].bar);
2413
2414 off += scnprintf(strbuf + off, size - off,
2415 "Address align 0x%08llx, ", ndev->mws[idx].addr_align);
2416
2417 off += scnprintf(strbuf + off, size - off,
2418 "Size align 0x%08llx, Size max %llu\n",
2419 ndev->mws[idx].size_align, ndev->mws[idx].size_max);
2420 }
2421
2422
2423 for (pidx = 0; pidx < ndev->peer_cnt; pidx++) {
2424 off += scnprintf(strbuf + off, size - off,
2425 "Inbound Memory Windows for peer %hhu (Port %hhu):\n",
2426 pidx, ndev->peers[pidx].port);
2427
2428
2429 for (idx = 0; idx < ndev->peers[pidx].mw_cnt; idx += cnt) {
2430 data = ndev->peers[pidx].mws[idx].type;
2431 cnt = idt_get_mw_count(data);
2432
2433 if (data == IDT_MW_DIR)
2434 off += scnprintf(strbuf + off, size - off,
2435 "\t%hhu.\t", idx);
2436 else
2437 off += scnprintf(strbuf + off, size - off,
2438 "\t%hhu-%hhu.\t", idx, idx + cnt - 1);
2439
2440 off += scnprintf(strbuf + off, size - off,
2441 "%s BAR%hhu, ", idt_get_mw_name(data),
2442 ndev->peers[pidx].mws[idx].bar);
2443
2444 off += scnprintf(strbuf + off, size - off,
2445 "Address align 0x%08llx, ",
2446 ndev->peers[pidx].mws[idx].addr_align);
2447
2448 off += scnprintf(strbuf + off, size - off,
2449 "Size align 0x%08llx, Size max %llu\n",
2450 ndev->peers[pidx].mws[idx].size_align,
2451 ndev->peers[pidx].mws[idx].size_max);
2452 }
2453 }
2454 off += scnprintf(strbuf + off, size - off, "\n");
2455
2456
2457 data = idt_sw_read(ndev, IDT_SW_GDBELLSTS);
2458 off += scnprintf(strbuf + off, size - off,
2459 "Global Doorbell state\t- 0x%08x\n", data);
2460 data = idt_ntb_db_read(&ndev->ntb);
2461 off += scnprintf(strbuf + off, size - off,
2462 "Local Doorbell state\t- 0x%08x\n", data);
2463 data = idt_nt_read(ndev, IDT_NT_INDBELLMSK);
2464 off += scnprintf(strbuf + off, size - off,
2465 "Local Doorbell mask\t- 0x%08x\n", data);
2466 off += scnprintf(strbuf + off, size - off, "\n");
2467
2468
2469 off += scnprintf(strbuf + off, size - off,
2470 "Message event valid\t- 0x%08x\n", IDT_MSG_MASK);
2471 data = idt_ntb_msg_read_sts(&ndev->ntb);
2472 off += scnprintf(strbuf + off, size - off,
2473 "Message event status\t- 0x%08x\n", data);
2474 data = idt_nt_read(ndev, IDT_NT_MSGSTSMSK);
2475 off += scnprintf(strbuf + off, size - off,
2476 "Message event mask\t- 0x%08x\n", data);
2477 off += scnprintf(strbuf + off, size - off,
2478 "Message data:\n");
2479 for (idx = 0; idx < IDT_MSG_CNT; idx++) {
2480 int src;
2481 data = idt_ntb_msg_read(&ndev->ntb, &src, idx);
2482 off += scnprintf(strbuf + off, size - off,
2483 "\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
2484 idx, data, src, ndev->peers[src].port);
2485 }
2486 off += scnprintf(strbuf + off, size - off, "\n");
2487
2488
2489 idt_read_temp(ndev, IDT_TEMP_CUR, &mdeg);
2490 off += scnprintf(strbuf + off, size - off,
2491 "Switch temperature\t\t- %hhd.%hhuC\n",
2492 idt_get_deg(mdeg), idt_get_deg_frac(mdeg));
2493
2494
2495 ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off);
2496 kfree(strbuf);
2497
2498 return ret;
2499 }
2500
2501
2502
2503
2504
2505
2506
2507 static int idt_init_dbgfs(struct idt_ntb_dev *ndev)
2508 {
2509 char devname[64];
2510
2511
2512 if (IS_ERR_OR_NULL(dbgfs_topdir)) {
2513 dev_info(&ndev->ntb.pdev->dev, "Top DebugFS directory absent");
2514 return PTR_ERR(dbgfs_topdir);
2515 }
2516
2517
2518 snprintf(devname, 64, "info:%s", pci_name(ndev->ntb.pdev));
2519 ndev->dbgfs_info = debugfs_create_file(devname, 0400, dbgfs_topdir,
2520 ndev, &idt_dbgfs_info_ops);
2521 if (IS_ERR(ndev->dbgfs_info)) {
2522 dev_dbg(&ndev->ntb.pdev->dev, "Failed to create DebugFS node");
2523 return PTR_ERR(ndev->dbgfs_info);
2524 }
2525
2526 dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node created");
2527
2528 return 0;
2529 }
2530
2531
2532
2533
2534
2535
2536
2537 static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev)
2538 {
2539 debugfs_remove(ndev->dbgfs_info);
2540
2541 dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node discarded");
2542 }
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556 static int idt_check_setup(struct pci_dev *pdev)
2557 {
2558 u32 data;
2559 int ret;
2560
2561
2562 ret = pci_read_config_dword(pdev, IDT_NT_BARSETUP0, &data);
2563 if (ret != 0) {
2564 dev_err(&pdev->dev,
2565 "Failed to read BARSETUP0 config register");
2566 return ret;
2567 }
2568
2569
2570 if (!(data & IDT_BARSETUP_EN) || !(data & IDT_BARSETUP_MODE_CFG)) {
2571 dev_err(&pdev->dev, "BAR0 doesn't map config space");
2572 return -EINVAL;
2573 }
2574
2575
2576 if ((data & IDT_BARSETUP_SIZE_MASK) != IDT_BARSETUP_SIZE_CFG) {
2577 dev_err(&pdev->dev, "Invalid size of config space");
2578 return -EINVAL;
2579 }
2580
2581 dev_dbg(&pdev->dev, "NTB device pre-initialized correctly");
2582
2583 return 0;
2584 }
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599 static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev,
2600 const struct pci_device_id *id)
2601 {
2602 struct idt_ntb_dev *ndev;
2603
2604
2605 ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL);
2606 if (!ndev) {
2607 dev_err(&pdev->dev, "Memory allocation failed for descriptor");
2608 return ERR_PTR(-ENOMEM);
2609 }
2610
2611
2612 ndev->swcfg = (struct idt_89hpes_cfg *)id->driver_data;
2613
2614 ndev->ntb.pdev = pdev;
2615
2616
2617 spin_lock_init(&ndev->db_mask_lock);
2618 spin_lock_init(&ndev->msg_mask_lock);
2619 spin_lock_init(&ndev->gasa_lock);
2620
2621 dev_info(&pdev->dev, "IDT %s discovered", ndev->swcfg->name);
2622
2623 dev_dbg(&pdev->dev, "NTB device descriptor created");
2624
2625 return ndev;
2626 }
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637 static int idt_init_pci(struct idt_ntb_dev *ndev)
2638 {
2639 struct pci_dev *pdev = ndev->ntb.pdev;
2640 int ret;
2641
2642
2643 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2644 if (ret != 0) {
2645 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2646 if (ret != 0) {
2647 dev_err(&pdev->dev, "Failed to set DMA bit mask\n");
2648 return ret;
2649 }
2650 dev_warn(&pdev->dev, "Cannot set DMA highmem bit mask\n");
2651 }
2652 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2653 if (ret != 0) {
2654 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2655 if (ret != 0) {
2656 dev_err(&pdev->dev,
2657 "Failed to set consistent DMA bit mask\n");
2658 return ret;
2659 }
2660 dev_warn(&pdev->dev,
2661 "Cannot set consistent DMA highmem bit mask\n");
2662 }
2663 ret = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
2664 dma_get_mask(&pdev->dev));
2665 if (ret != 0) {
2666 dev_err(&pdev->dev, "Failed to set NTB device DMA bit mask\n");
2667 return ret;
2668 }
2669
2670
2671
2672
2673
2674 ret = pci_enable_pcie_error_reporting(pdev);
2675 if (ret != 0)
2676 dev_warn(&pdev->dev, "PCIe AER capability disabled\n");
2677 else
2678 pci_cleanup_aer_uncorrect_error_status(pdev);
2679
2680
2681 ret = pcim_enable_device(pdev);
2682 if (ret != 0) {
2683 dev_err(&pdev->dev, "Failed to enable PCIe device\n");
2684 goto err_disable_aer;
2685 }
2686
2687
2688
2689
2690
2691 pci_set_master(pdev);
2692
2693
2694 ret = pcim_iomap_regions_request_all(pdev, 1, NTB_NAME);
2695 if (ret != 0) {
2696 dev_err(&pdev->dev, "Failed to request resources\n");
2697 goto err_clear_master;
2698 }
2699
2700
2701 ndev->cfgspc = pcim_iomap_table(pdev)[0];
2702
2703
2704 pci_set_drvdata(pdev, ndev);
2705
2706 dev_dbg(&pdev->dev, "NT-function PCIe interface initialized");
2707
2708 return 0;
2709
2710 err_clear_master:
2711 pci_clear_master(pdev);
2712 err_disable_aer:
2713 (void)pci_disable_pcie_error_reporting(pdev);
2714
2715 return ret;
2716 }
2717
2718
2719
2720
2721
2722
2723
2724 static void idt_deinit_pci(struct idt_ntb_dev *ndev)
2725 {
2726 struct pci_dev *pdev = ndev->ntb.pdev;
2727
2728
2729 pci_set_drvdata(pdev, NULL);
2730
2731
2732 pci_clear_master(pdev);
2733
2734
2735 (void)pci_disable_pcie_error_reporting(pdev);
2736
2737 dev_dbg(&pdev->dev, "NT-function PCIe interface cleared");
2738 }
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752 static int idt_pci_probe(struct pci_dev *pdev,
2753 const struct pci_device_id *id)
2754 {
2755 struct idt_ntb_dev *ndev;
2756 int ret;
2757
2758
2759 ret = idt_check_setup(pdev);
2760 if (ret != 0)
2761 return ret;
2762
2763
2764 ndev = idt_create_dev(pdev, id);
2765 if (IS_ERR_OR_NULL(ndev))
2766 return PTR_ERR(ndev);
2767
2768
2769 ret = idt_init_pci(ndev);
2770 if (ret != 0)
2771 return ret;
2772
2773
2774 (void)idt_scan_ports(ndev);
2775
2776
2777 idt_init_link(ndev);
2778
2779
2780 ret = idt_init_mws(ndev);
2781 if (ret != 0)
2782 goto err_deinit_link;
2783
2784
2785 idt_init_msg(ndev);
2786
2787
2788 idt_init_temp(ndev);
2789
2790
2791 ret = idt_init_isr(ndev);
2792 if (ret != 0)
2793 goto err_deinit_link;
2794
2795
2796 ret = idt_register_device(ndev);
2797 if (ret != 0)
2798 goto err_deinit_isr;
2799
2800
2801 (void)idt_init_dbgfs(ndev);
2802
2803
2804 dev_info(&pdev->dev, "IDT NTB device is ready");
2805
2806
2807 return 0;
2808
2809 err_deinit_isr:
2810 idt_deinit_isr(ndev);
2811 err_deinit_link:
2812 idt_deinit_link(ndev);
2813 idt_deinit_pci(ndev);
2814
2815 return ret;
2816 }
2817
2818
2819
2820
2821
2822 static void idt_pci_remove(struct pci_dev *pdev)
2823 {
2824 struct idt_ntb_dev *ndev = pci_get_drvdata(pdev);
2825
2826
2827 idt_deinit_dbgfs(ndev);
2828
2829
2830 idt_unregister_device(ndev);
2831
2832
2833 idt_deinit_isr(ndev);
2834
2835
2836 idt_deinit_link(ndev);
2837
2838
2839 idt_deinit_pci(ndev);
2840
2841
2842 dev_info(&pdev->dev, "IDT NTB device is removed");
2843
2844
2845 }
2846
2847
2848
2849
2850 static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
2851 .name = "89HPES24NT6AG2",
2852 .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
2853 };
2854 static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
2855 .name = "89HPES32NT8AG2",
2856 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2857 };
2858 static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
2859 .name = "89HPES32NT8BG2",
2860 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2861 };
2862 static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
2863 .name = "89HPES12NT12G2",
2864 .port_cnt = 3, .ports = {0, 8, 16}
2865 };
2866 static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
2867 .name = "89HPES16NT16G2",
2868 .port_cnt = 4, .ports = {0, 8, 12, 16}
2869 };
2870 static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
2871 .name = "89HPES24NT24G2",
2872 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2873 };
2874 static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
2875 .name = "89HPES32NT24AG2",
2876 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2877 };
2878 static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
2879 .name = "89HPES32NT24BG2",
2880 .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
2881 };
2882
2883
2884
2885
2886 static const struct pci_device_id idt_pci_tbl[] = {
2887 {IDT_PCI_DEVICE_IDS(89HPES24NT6AG2, idt_89hpes24nt6ag2_config)},
2888 {IDT_PCI_DEVICE_IDS(89HPES32NT8AG2, idt_89hpes32nt8ag2_config)},
2889 {IDT_PCI_DEVICE_IDS(89HPES32NT8BG2, idt_89hpes32nt8bg2_config)},
2890 {IDT_PCI_DEVICE_IDS(89HPES12NT12G2, idt_89hpes12nt12g2_config)},
2891 {IDT_PCI_DEVICE_IDS(89HPES16NT16G2, idt_89hpes16nt16g2_config)},
2892 {IDT_PCI_DEVICE_IDS(89HPES24NT24G2, idt_89hpes24nt24g2_config)},
2893 {IDT_PCI_DEVICE_IDS(89HPES32NT24AG2, idt_89hpes32nt24ag2_config)},
2894 {IDT_PCI_DEVICE_IDS(89HPES32NT24BG2, idt_89hpes32nt24bg2_config)},
2895 {0}
2896 };
2897 MODULE_DEVICE_TABLE(pci, idt_pci_tbl);
2898
2899
2900
2901
2902 static struct pci_driver idt_pci_driver = {
2903 .name = KBUILD_MODNAME,
2904 .probe = idt_pci_probe,
2905 .remove = idt_pci_remove,
2906 .id_table = idt_pci_tbl,
2907 };
2908
2909 static int __init idt_pci_driver_init(void)
2910 {
2911 pr_info("%s %s\n", NTB_DESC, NTB_VER);
2912
2913
2914 if (debugfs_initialized())
2915 dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2916
2917
2918 return pci_register_driver(&idt_pci_driver);
2919 }
2920 module_init(idt_pci_driver_init);
2921
2922 static void __exit idt_pci_driver_exit(void)
2923 {
2924
2925 pci_unregister_driver(&idt_pci_driver);
2926
2927
2928 debugfs_remove_recursive(dbgfs_topdir);
2929 }
2930 module_exit(idt_pci_driver_exit);
2931