This source file includes following definitions.
- tb_phy_port_from_link
- tb_xdomain_find_by_uuid_locked
- tb_xdomain_find_by_route_locked
- tb_xdomain_get
- tb_xdomain_put
- tb_is_xdomain
- tb_to_xdomain
- tb_service_get
- tb_service_put
- tb_is_service
- tb_to_service
- tb_service_get_drvdata
- tb_service_set_drvdata
- tb_service_parent
- tb_ring_rx
- tb_ring_tx
- tb_ring_dma_device
1
2
3
4
5
6
7
8
9
10
11 #ifndef THUNDERBOLT_H_
12 #define THUNDERBOLT_H_
13
14 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/pci.h>
20 #include <linux/uuid.h>
21 #include <linux/workqueue.h>
22
23 enum tb_cfg_pkg_type {
24 TB_CFG_PKG_READ = 1,
25 TB_CFG_PKG_WRITE = 2,
26 TB_CFG_PKG_ERROR = 3,
27 TB_CFG_PKG_NOTIFY_ACK = 4,
28 TB_CFG_PKG_EVENT = 5,
29 TB_CFG_PKG_XDOMAIN_REQ = 6,
30 TB_CFG_PKG_XDOMAIN_RESP = 7,
31 TB_CFG_PKG_OVERRIDE = 8,
32 TB_CFG_PKG_RESET = 9,
33 TB_CFG_PKG_ICM_EVENT = 10,
34 TB_CFG_PKG_ICM_CMD = 11,
35 TB_CFG_PKG_ICM_RESP = 12,
36 TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
37 };
38
39
40
41
42
43
44
45
46
47
48
49 enum tb_security_level {
50 TB_SECURITY_NONE,
51 TB_SECURITY_USER,
52 TB_SECURITY_SECURE,
53 TB_SECURITY_DPONLY,
54 TB_SECURITY_USBONLY,
55 };
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72 struct tb {
73 struct device dev;
74 struct mutex lock;
75 struct tb_nhi *nhi;
76 struct tb_ctl *ctl;
77 struct workqueue_struct *wq;
78 struct tb_switch *root_switch;
79 const struct tb_cm_ops *cm_ops;
80 int index;
81 enum tb_security_level security_level;
82 size_t nboot_acl;
83 unsigned long privdata[0];
84 };
85
86 extern struct bus_type tb_bus_type;
87 extern struct device_type tb_service_type;
88 extern struct device_type tb_xdomain_type;
89
90 #define TB_LINKS_PER_PHY_PORT 2
91
92 static inline unsigned int tb_phy_port_from_link(unsigned int link)
93 {
94 return (link - 1) / TB_LINKS_PER_PHY_PORT;
95 }
96
97
98
99
100
101
102
103
104 struct tb_property_dir {
105 const uuid_t *uuid;
106 struct list_head properties;
107 };
108
109 enum tb_property_type {
110 TB_PROPERTY_TYPE_UNKNOWN = 0x00,
111 TB_PROPERTY_TYPE_DIRECTORY = 0x44,
112 TB_PROPERTY_TYPE_DATA = 0x64,
113 TB_PROPERTY_TYPE_TEXT = 0x74,
114 TB_PROPERTY_TYPE_VALUE = 0x76,
115 };
116
117 #define TB_PROPERTY_KEY_SIZE 8
118
119
120
121
122
123
124
125
126
127
128
129 struct tb_property {
130 struct list_head list;
131 char key[TB_PROPERTY_KEY_SIZE + 1];
132 enum tb_property_type type;
133 size_t length;
134 union {
135 struct tb_property_dir *dir;
136 u8 *data;
137 char *text;
138 u32 immediate;
139 } value;
140 };
141
142 struct tb_property_dir *tb_property_parse_dir(const u32 *block,
143 size_t block_len);
144 ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
145 size_t block_len);
146 struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
147 void tb_property_free_dir(struct tb_property_dir *dir);
148 int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
149 u32 value);
150 int tb_property_add_data(struct tb_property_dir *parent, const char *key,
151 const void *buf, size_t buflen);
152 int tb_property_add_text(struct tb_property_dir *parent, const char *key,
153 const char *text);
154 int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
155 struct tb_property_dir *dir);
156 void tb_property_remove(struct tb_property *tb_property);
157 struct tb_property *tb_property_find(struct tb_property_dir *dir,
158 const char *key, enum tb_property_type type);
159 struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
160 struct tb_property *prev);
161
162 #define tb_property_for_each(dir, property) \
163 for (property = tb_property_get_next(dir, NULL); \
164 property; \
165 property = tb_property_get_next(dir, property))
166
167 int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
168 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215 struct tb_xdomain {
216 struct device dev;
217 struct tb *tb;
218 uuid_t *remote_uuid;
219 const uuid_t *local_uuid;
220 u64 route;
221 u16 vendor;
222 u16 device;
223 struct mutex lock;
224 const char *vendor_name;
225 const char *device_name;
226 bool is_unplugged;
227 bool resume;
228 bool needs_uuid;
229 u16 transmit_path;
230 u16 transmit_ring;
231 u16 receive_path;
232 u16 receive_ring;
233 struct ida service_ids;
234 struct tb_property_dir *properties;
235 u32 property_block_gen;
236 struct delayed_work get_uuid_work;
237 int uuid_retries;
238 struct delayed_work get_properties_work;
239 int properties_retries;
240 struct delayed_work properties_changed_work;
241 int properties_changed_retries;
242 u8 link;
243 u8 depth;
244 };
245
246 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
247 u16 transmit_ring, u16 receive_path,
248 u16 receive_ring);
249 int tb_xdomain_disable_paths(struct tb_xdomain *xd);
250 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
251 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
252
253 static inline struct tb_xdomain *
254 tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
255 {
256 struct tb_xdomain *xd;
257
258 mutex_lock(&tb->lock);
259 xd = tb_xdomain_find_by_uuid(tb, uuid);
260 mutex_unlock(&tb->lock);
261
262 return xd;
263 }
264
265 static inline struct tb_xdomain *
266 tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
267 {
268 struct tb_xdomain *xd;
269
270 mutex_lock(&tb->lock);
271 xd = tb_xdomain_find_by_route(tb, route);
272 mutex_unlock(&tb->lock);
273
274 return xd;
275 }
276
277 static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
278 {
279 if (xd)
280 get_device(&xd->dev);
281 return xd;
282 }
283
284 static inline void tb_xdomain_put(struct tb_xdomain *xd)
285 {
286 if (xd)
287 put_device(&xd->dev);
288 }
289
290 static inline bool tb_is_xdomain(const struct device *dev)
291 {
292 return dev->type == &tb_xdomain_type;
293 }
294
295 static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
296 {
297 if (tb_is_xdomain(dev))
298 return container_of(dev, struct tb_xdomain, dev);
299 return NULL;
300 }
301
302 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
303 size_t size, enum tb_cfg_pkg_type type);
304 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
305 size_t request_size, enum tb_cfg_pkg_type request_type,
306 void *response, size_t response_size,
307 enum tb_cfg_pkg_type response_type,
308 unsigned int timeout_msec);
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328 struct tb_protocol_handler {
329 const uuid_t *uuid;
330 int (*callback)(const void *buf, size_t size, void *data);
331 void *data;
332 struct list_head list;
333 };
334
335 int tb_register_protocol_handler(struct tb_protocol_handler *handler);
336 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352 struct tb_service {
353 struct device dev;
354 int id;
355 const char *key;
356 u32 prtcid;
357 u32 prtcvers;
358 u32 prtcrevs;
359 u32 prtcstns;
360 };
361
362 static inline struct tb_service *tb_service_get(struct tb_service *svc)
363 {
364 if (svc)
365 get_device(&svc->dev);
366 return svc;
367 }
368
369 static inline void tb_service_put(struct tb_service *svc)
370 {
371 if (svc)
372 put_device(&svc->dev);
373 }
374
375 static inline bool tb_is_service(const struct device *dev)
376 {
377 return dev->type == &tb_service_type;
378 }
379
380 static inline struct tb_service *tb_to_service(struct device *dev)
381 {
382 if (tb_is_service(dev))
383 return container_of(dev, struct tb_service, dev);
384 return NULL;
385 }
386
387
388
389
390
391
392
393
394
395 struct tb_service_driver {
396 struct device_driver driver;
397 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
398 void (*remove)(struct tb_service *svc);
399 void (*shutdown)(struct tb_service *svc);
400 const struct tb_service_id *id_table;
401 };
402
403 #define TB_SERVICE(key, id) \
404 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
405 TBSVC_MATCH_PROTOCOL_ID, \
406 .protocol_key = (key), \
407 .protocol_id = (id)
408
409 int tb_register_service_driver(struct tb_service_driver *drv);
410 void tb_unregister_service_driver(struct tb_service_driver *drv);
411
412 static inline void *tb_service_get_drvdata(const struct tb_service *svc)
413 {
414 return dev_get_drvdata(&svc->dev);
415 }
416
417 static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
418 {
419 dev_set_drvdata(&svc->dev, data);
420 }
421
422 static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
423 {
424 return tb_to_xdomain(svc->dev.parent);
425 }
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443 struct tb_nhi {
444 spinlock_t lock;
445 struct pci_dev *pdev;
446 const struct tb_nhi_ops *ops;
447 void __iomem *iobase;
448 struct tb_ring **tx_rings;
449 struct tb_ring **rx_rings;
450 struct ida msix_ida;
451 bool going_away;
452 struct work_struct interrupt_work;
453 u32 hop_count;
454 };
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480 struct tb_ring {
481 spinlock_t lock;
482 struct tb_nhi *nhi;
483 int size;
484 int hop;
485 int head;
486 int tail;
487 struct ring_desc *descriptors;
488 dma_addr_t descriptors_dma;
489 struct list_head queue;
490 struct list_head in_flight;
491 struct work_struct work;
492 bool is_tx:1;
493 bool running:1;
494 int irq;
495 u8 vector;
496 unsigned int flags;
497 u16 sof_mask;
498 u16 eof_mask;
499 void (*start_poll)(void *data);
500 void *poll_data;
501 };
502
503
504 #define RING_FLAG_NO_SUSPEND BIT(0)
505
506 #define RING_FLAG_FRAME BIT(1)
507
508 #define RING_FLAG_E2E BIT(2)
509
510 struct ring_frame;
511 typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
512
513
514
515
516
517
518
519
520
521
522 enum ring_desc_flags {
523 RING_DESC_ISOCH = 0x1,
524 RING_DESC_CRC_ERROR = 0x1,
525 RING_DESC_COMPLETED = 0x2,
526 RING_DESC_POSTED = 0x4,
527 RING_DESC_BUFFER_OVERRUN = 0x04,
528 RING_DESC_INTERRUPT = 0x8,
529 };
530
531
532
533
534
535
536
537
538
539
540
541 struct ring_frame {
542 dma_addr_t buffer_phy;
543 ring_cb callback;
544 struct list_head list;
545 u32 size:12;
546 u32 flags:12;
547 u32 eof:4;
548 u32 sof:4;
549 };
550
551
552 #define TB_FRAME_SIZE 0x100
553
554 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
555 unsigned int flags);
556 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
557 unsigned int flags, u16 sof_mask, u16 eof_mask,
558 void (*start_poll)(void *), void *poll_data);
559 void tb_ring_start(struct tb_ring *ring);
560 void tb_ring_stop(struct tb_ring *ring);
561 void tb_ring_free(struct tb_ring *ring);
562
563 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581 static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
582 {
583 WARN_ON(ring->is_tx);
584 return __tb_ring_enqueue(ring, frame);
585 }
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602 static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
603 {
604 WARN_ON(!ring->is_tx);
605 return __tb_ring_enqueue(ring, frame);
606 }
607
608
609 struct ring_frame *tb_ring_poll(struct tb_ring *ring);
610 void tb_ring_poll_complete(struct tb_ring *ring);
611
612
613
614
615
616
617
618
619 static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
620 {
621 return &ring->nhi->pdev->dev;
622 }
623
624 #endif