This source file includes following definitions.
- xenvif_to_xenbus_device
- nr_pending_reqs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #ifndef __XEN_NETBACK__COMMON_H__
28 #define __XEN_NETBACK__COMMON_H__
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
31
32 #include <linux/module.h>
33 #include <linux/interrupt.h>
34 #include <linux/slab.h>
35 #include <linux/ip.h>
36 #include <linux/in.h>
37 #include <linux/io.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/wait.h>
41 #include <linux/sched.h>
42
43 #include <xen/interface/io/netif.h>
44 #include <xen/interface/grant_table.h>
45 #include <xen/grant_table.h>
46 #include <xen/xenbus.h>
47 #include <xen/page.h>
48 #include <linux/debugfs.h>
49
50 typedef unsigned int pending_ring_idx_t;
51 #define INVALID_PENDING_RING_IDX (~0U)
52
53 struct pending_tx_info {
54 struct xen_netif_tx_request req;
55 unsigned int extra_count;
56
57
58
59
60
61
62
63
64
65
66 struct ubuf_info callback_struct;
67 };
68
69 #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
70 #define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
71
72 struct xenvif_rx_meta {
73 int id;
74 int size;
75 int gso_type;
76 int gso_size;
77 };
78
79 #define GSO_BIT(type) \
80 (1 << XEN_NETIF_GSO_TYPE_ ## type)
81
82
83 #define INVALID_PENDING_IDX 0xFFFF
84
85 #define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
86
87 #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
88
89
90
91
92 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
93
94 #define NETBACK_INVALID_HANDLE -1
95
96
97
98
99
100
101 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
102
103
104 #define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
105
106
107 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
108
109 struct xenvif;
110
111 struct xenvif_stats {
112
113
114
115
116 u64 rx_bytes;
117 u64 rx_packets;
118 u64 tx_bytes;
119 u64 tx_packets;
120
121
122 unsigned long rx_gso_checksum_fixup;
123 unsigned long tx_zerocopy_sent;
124 unsigned long tx_zerocopy_success;
125 unsigned long tx_zerocopy_fail;
126 unsigned long tx_frag_overflow;
127 };
128
129 #define COPY_BATCH_SIZE 64
130
131 struct xenvif_copy_state {
132 struct gnttab_copy op[COPY_BATCH_SIZE];
133 RING_IDX idx[COPY_BATCH_SIZE];
134 unsigned int num;
135 struct sk_buff_head *completed;
136 };
137
138 struct xenvif_queue {
139 unsigned int id;
140 char name[QUEUE_NAME_SIZE];
141 struct xenvif *vif;
142
143
144 struct napi_struct napi;
145
146 unsigned int tx_irq;
147
148 char tx_irq_name[IRQ_NAME_SIZE];
149 struct xen_netif_tx_back_ring tx;
150 struct sk_buff_head tx_queue;
151 struct page *mmap_pages[MAX_PENDING_REQS];
152 pending_ring_idx_t pending_prod;
153 pending_ring_idx_t pending_cons;
154 u16 pending_ring[MAX_PENDING_REQS];
155 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
156 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
157
158 struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
159 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
160 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
161
162 struct page *pages_to_map[MAX_PENDING_REQS];
163 struct page *pages_to_unmap[MAX_PENDING_REQS];
164
165
166 spinlock_t callback_lock;
167
168
169
170
171 spinlock_t response_lock;
172 pending_ring_idx_t dealloc_prod;
173 pending_ring_idx_t dealloc_cons;
174 u16 dealloc_ring[MAX_PENDING_REQS];
175 struct task_struct *dealloc_task;
176 wait_queue_head_t dealloc_wq;
177 atomic_t inflight_packets;
178
179
180 struct task_struct *task;
181 wait_queue_head_t wq;
182
183 unsigned int rx_irq;
184
185 char rx_irq_name[IRQ_NAME_SIZE];
186 struct xen_netif_rx_back_ring rx;
187 struct sk_buff_head rx_queue;
188
189 unsigned int rx_queue_max;
190 unsigned int rx_queue_len;
191 unsigned long last_rx_time;
192 bool stalled;
193
194 struct xenvif_copy_state rx_copy;
195
196
197 unsigned long credit_bytes;
198 unsigned long credit_usec;
199 unsigned long remaining_credit;
200 struct timer_list credit_timeout;
201 u64 credit_window_start;
202 bool rate_limited;
203
204
205 struct xenvif_stats stats;
206 };
207
208 enum state_bit_shift {
209
210 VIF_STATUS_CONNECTED,
211 };
212
213 struct xenvif_mcast_addr {
214 struct list_head entry;
215 struct rcu_head rcu;
216 u8 addr[6];
217 };
218
219 #define XEN_NETBK_MCAST_MAX 64
220
221 #define XEN_NETBK_MAX_HASH_KEY_SIZE 40
222 #define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
223 #define XEN_NETBK_HASH_TAG_SIZE 40
224
225 struct xenvif_hash_cache_entry {
226 struct list_head link;
227 struct rcu_head rcu;
228 u8 tag[XEN_NETBK_HASH_TAG_SIZE];
229 unsigned int len;
230 u32 val;
231 int seq;
232 };
233
234 struct xenvif_hash_cache {
235 spinlock_t lock;
236 struct list_head list;
237 unsigned int count;
238 atomic_t seq;
239 };
240
241 struct xenvif_hash {
242 unsigned int alg;
243 u32 flags;
244 bool mapping_sel;
245 u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
246 u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
247 unsigned int size;
248 struct xenvif_hash_cache cache;
249 };
250
251 struct backend_info {
252 struct xenbus_device *dev;
253 struct xenvif *vif;
254
255
256
257
258 enum xenbus_state state;
259
260 enum xenbus_state frontend_state;
261 struct xenbus_watch hotplug_status_watch;
262 u8 have_hotplug_status_watch:1;
263
264 const char *hotplug_script;
265 };
266
267 struct xenvif {
268
269 domid_t domid;
270 unsigned int handle;
271
272 u8 fe_dev_addr[6];
273 struct list_head fe_mcast_addr;
274 unsigned int fe_mcast_count;
275
276
277 int gso_mask;
278
279 u8 can_sg:1;
280 u8 ip_csum:1;
281 u8 ipv6_csum:1;
282 u8 multicast_control:1;
283
284
285
286
287 bool disabled;
288 unsigned long status;
289 unsigned long drain_timeout;
290 unsigned long stall_timeout;
291
292
293 struct xenvif_queue *queues;
294 unsigned int num_queues;
295 unsigned int stalled_queues;
296
297 struct xenvif_hash hash;
298
299 struct xenbus_watch credit_watch;
300 struct xenbus_watch mcast_ctrl_watch;
301
302 struct backend_info *be;
303
304 spinlock_t lock;
305
306 #ifdef CONFIG_DEBUG_FS
307 struct dentry *xenvif_dbg_root;
308 #endif
309
310 struct xen_netif_ctrl_back_ring ctrl;
311 unsigned int ctrl_irq;
312
313
314 struct net_device *dev;
315 };
316
317 struct xenvif_rx_cb {
318 unsigned long expires;
319 int meta_slots_used;
320 };
321
322 #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
323
324 static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
325 {
326 return to_xenbus_device(vif->dev->dev.parent);
327 }
328
329 void xenvif_tx_credit_callback(struct timer_list *t);
330
331 struct xenvif *xenvif_alloc(struct device *parent,
332 domid_t domid,
333 unsigned int handle);
334
335 int xenvif_init_queue(struct xenvif_queue *queue);
336 void xenvif_deinit_queue(struct xenvif_queue *queue);
337
338 int xenvif_connect_data(struct xenvif_queue *queue,
339 unsigned long tx_ring_ref,
340 unsigned long rx_ring_ref,
341 unsigned int tx_evtchn,
342 unsigned int rx_evtchn);
343 void xenvif_disconnect_data(struct xenvif *vif);
344 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
345 unsigned int evtchn);
346 void xenvif_disconnect_ctrl(struct xenvif *vif);
347 void xenvif_free(struct xenvif *vif);
348
349 int xenvif_xenbus_init(void);
350 void xenvif_xenbus_fini(void);
351
352 int xenvif_schedulable(struct xenvif *vif);
353
354 int xenvif_queue_stopped(struct xenvif_queue *queue);
355 void xenvif_wake_queue(struct xenvif_queue *queue);
356
357
358 void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
359 int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
360 grant_ref_t tx_ring_ref,
361 grant_ref_t rx_ring_ref);
362
363
364 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
365
366
367 void xenvif_carrier_off(struct xenvif *vif);
368
369 int xenvif_tx_action(struct xenvif_queue *queue, int budget);
370
371 int xenvif_kthread_guest_rx(void *data);
372 void xenvif_kick_thread(struct xenvif_queue *queue);
373
374 int xenvif_dealloc_kthread(void *data);
375
376 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
377
378 void xenvif_rx_action(struct xenvif_queue *queue);
379 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
380
381 void xenvif_carrier_on(struct xenvif *vif);
382
383
384 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
385
386
387 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
388
389 static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
390 {
391 return MAX_PENDING_REQS -
392 queue->pending_prod + queue->pending_cons;
393 }
394
395 irqreturn_t xenvif_interrupt(int irq, void *dev_id);
396
397 extern bool separate_tx_rx_irq;
398
399 extern unsigned int rx_drain_timeout_msecs;
400 extern unsigned int rx_stall_timeout_msecs;
401 extern unsigned int xenvif_max_queues;
402 extern unsigned int xenvif_hash_cache_size;
403
404 #ifdef CONFIG_DEBUG_FS
405 extern struct dentry *xen_netback_dbg_root;
406 #endif
407
408 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
409 struct sk_buff *skb);
410 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
411
412
413 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
414 void xenvif_mcast_addr_list_free(struct xenvif *vif);
415
416
417 void xenvif_init_hash(struct xenvif *vif);
418 void xenvif_deinit_hash(struct xenvif *vif);
419
420 u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
421 u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
422 u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
423 u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
424 u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
425 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
426 u32 off);
427
428 void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
429
430 #ifdef CONFIG_DEBUG_FS
431 void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
432 #endif
433
434 #endif