This source file includes following definitions.
- net_busy_loop_on
- sk_can_busy_loop
- net_busy_loop_on
- sk_can_busy_loop
- busy_loop_current_time
- busy_loop_timeout
- sk_busy_loop_timeout
- sk_busy_loop
- skb_mark_napi_id
- sk_mark_napi_id
- sk_mark_napi_id_once
1
2
3
4
5
6
7
8
9
10
11
12 #ifndef _LINUX_NET_BUSY_POLL_H
13 #define _LINUX_NET_BUSY_POLL_H
14
15 #include <linux/netdevice.h>
16 #include <linux/sched/clock.h>
17 #include <linux/sched/signal.h>
18 #include <net/ip.h>
19
20
21
22
23
24 #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
25
26 #ifdef CONFIG_NET_RX_BUSY_POLL
27
28 struct napi_struct;
29 extern unsigned int sysctl_net_busy_read __read_mostly;
30 extern unsigned int sysctl_net_busy_poll __read_mostly;
31
32 static inline bool net_busy_loop_on(void)
33 {
34 return sysctl_net_busy_poll;
35 }
36
37 static inline bool sk_can_busy_loop(const struct sock *sk)
38 {
39 return sk->sk_ll_usec && !signal_pending(current);
40 }
41
42 bool sk_busy_loop_end(void *p, unsigned long start_time);
43
44 void napi_busy_loop(unsigned int napi_id,
45 bool (*loop_end)(void *, unsigned long),
46 void *loop_end_arg);
47
48 #else
49 static inline unsigned long net_busy_loop_on(void)
50 {
51 return 0;
52 }
53
54 static inline bool sk_can_busy_loop(struct sock *sk)
55 {
56 return false;
57 }
58
59 #endif
60
61 static inline unsigned long busy_loop_current_time(void)
62 {
63 #ifdef CONFIG_NET_RX_BUSY_POLL
64 return (unsigned long)(local_clock() >> 10);
65 #else
66 return 0;
67 #endif
68 }
69
70
71 static inline bool busy_loop_timeout(unsigned long start_time)
72 {
73 #ifdef CONFIG_NET_RX_BUSY_POLL
74 unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
75
76 if (bp_usec) {
77 unsigned long end_time = start_time + bp_usec;
78 unsigned long now = busy_loop_current_time();
79
80 return time_after(now, end_time);
81 }
82 #endif
83 return true;
84 }
85
86 static inline bool sk_busy_loop_timeout(struct sock *sk,
87 unsigned long start_time)
88 {
89 #ifdef CONFIG_NET_RX_BUSY_POLL
90 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
91
92 if (bp_usec) {
93 unsigned long end_time = start_time + bp_usec;
94 unsigned long now = busy_loop_current_time();
95
96 return time_after(now, end_time);
97 }
98 #endif
99 return true;
100 }
101
102 static inline void sk_busy_loop(struct sock *sk, int nonblock)
103 {
104 #ifdef CONFIG_NET_RX_BUSY_POLL
105 unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
106
107 if (napi_id >= MIN_NAPI_ID)
108 napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk);
109 #endif
110 }
111
112
113 static inline void skb_mark_napi_id(struct sk_buff *skb,
114 struct napi_struct *napi)
115 {
116 #ifdef CONFIG_NET_RX_BUSY_POLL
117 skb->napi_id = napi->napi_id;
118 #endif
119 }
120
121
122 static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
123 {
124 #ifdef CONFIG_NET_RX_BUSY_POLL
125 WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
126 #endif
127 sk_rx_queue_set(sk, skb);
128 }
129
130
131 static inline void sk_mark_napi_id_once(struct sock *sk,
132 const struct sk_buff *skb)
133 {
134 #ifdef CONFIG_NET_RX_BUSY_POLL
135 if (!READ_ONCE(sk->sk_napi_id))
136 WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
137 #endif
138 }
139
140 #endif