This source file includes following definitions.
- default_operstate
- rfc2863_policy
- linkwatch_init_dev
- linkwatch_urgent_event
- linkwatch_add_event
- linkwatch_schedule_work
- linkwatch_do_dev
- __linkwatch_run_queue
- linkwatch_forget_dev
- linkwatch_run_queue
- linkwatch_event
- linkwatch_fire_event
1
2
3
4
5
6
7
8
9 #include <linux/module.h>
10 #include <linux/netdevice.h>
11 #include <linux/if.h>
12 #include <net/sock.h>
13 #include <net/pkt_sched.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/jiffies.h>
16 #include <linux/spinlock.h>
17 #include <linux/workqueue.h>
18 #include <linux/bitops.h>
19 #include <linux/types.h>
20
21
22 enum lw_bits {
23 LW_URGENT = 0,
24 };
25
26 static unsigned long linkwatch_flags;
27 static unsigned long linkwatch_nextevent;
28
29 static void linkwatch_event(struct work_struct *dummy);
30 static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
31
32 static LIST_HEAD(lweventlist);
33 static DEFINE_SPINLOCK(lweventlist_lock);
34
35 static unsigned char default_operstate(const struct net_device *dev)
36 {
37 if (!netif_carrier_ok(dev))
38 return (dev->ifindex != dev_get_iflink(dev) ?
39 IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
40
41 if (netif_dormant(dev))
42 return IF_OPER_DORMANT;
43
44 return IF_OPER_UP;
45 }
46
47
48 static void rfc2863_policy(struct net_device *dev)
49 {
50 unsigned char operstate = default_operstate(dev);
51
52 if (operstate == dev->operstate)
53 return;
54
55 write_lock_bh(&dev_base_lock);
56
57 switch(dev->link_mode) {
58 case IF_LINK_MODE_DORMANT:
59 if (operstate == IF_OPER_UP)
60 operstate = IF_OPER_DORMANT;
61 break;
62
63 case IF_LINK_MODE_DEFAULT:
64 default:
65 break;
66 }
67
68 dev->operstate = operstate;
69
70 write_unlock_bh(&dev_base_lock);
71 }
72
73
74 void linkwatch_init_dev(struct net_device *dev)
75 {
76
77 if (!netif_carrier_ok(dev) || netif_dormant(dev))
78 rfc2863_policy(dev);
79 }
80
81
82 static bool linkwatch_urgent_event(struct net_device *dev)
83 {
84 if (!netif_running(dev))
85 return false;
86
87 if (dev->ifindex != dev_get_iflink(dev))
88 return true;
89
90 if (netif_is_lag_port(dev) || netif_is_lag_master(dev))
91 return true;
92
93 return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
94 }
95
96
97 static void linkwatch_add_event(struct net_device *dev)
98 {
99 unsigned long flags;
100
101 spin_lock_irqsave(&lweventlist_lock, flags);
102 if (list_empty(&dev->link_watch_list)) {
103 list_add_tail(&dev->link_watch_list, &lweventlist);
104 dev_hold(dev);
105 }
106 spin_unlock_irqrestore(&lweventlist_lock, flags);
107 }
108
109
110 static void linkwatch_schedule_work(int urgent)
111 {
112 unsigned long delay = linkwatch_nextevent - jiffies;
113
114 if (test_bit(LW_URGENT, &linkwatch_flags))
115 return;
116
117
118 if (urgent) {
119 if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
120 return;
121 delay = 0;
122 }
123
124
125 if (delay > HZ)
126 delay = 0;
127
128
129
130
131
132 if (test_bit(LW_URGENT, &linkwatch_flags))
133 mod_delayed_work(system_wq, &linkwatch_work, 0);
134 else
135 schedule_delayed_work(&linkwatch_work, delay);
136 }
137
138
139 static void linkwatch_do_dev(struct net_device *dev)
140 {
141
142
143
144
145 smp_mb__before_atomic();
146
147
148
149
150 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
151
152 rfc2863_policy(dev);
153 if (dev->flags & IFF_UP && netif_device_present(dev)) {
154 if (netif_carrier_ok(dev))
155 dev_activate(dev);
156 else
157 dev_deactivate(dev);
158
159 netdev_state_change(dev);
160 }
161 dev_put(dev);
162 }
163
164 static void __linkwatch_run_queue(int urgent_only)
165 {
166 #define MAX_DO_DEV_PER_LOOP 100
167
168 int do_dev = MAX_DO_DEV_PER_LOOP;
169 struct net_device *dev;
170 LIST_HEAD(wrk);
171
172
173 if (urgent_only)
174 do_dev += MAX_DO_DEV_PER_LOOP;
175
176
177
178
179
180
181
182
183 if (!urgent_only)
184 linkwatch_nextevent = jiffies + HZ;
185
186 else if (time_after(linkwatch_nextevent, jiffies + HZ))
187 linkwatch_nextevent = jiffies;
188
189 clear_bit(LW_URGENT, &linkwatch_flags);
190
191 spin_lock_irq(&lweventlist_lock);
192 list_splice_init(&lweventlist, &wrk);
193
194 while (!list_empty(&wrk) && do_dev > 0) {
195
196 dev = list_first_entry(&wrk, struct net_device, link_watch_list);
197 list_del_init(&dev->link_watch_list);
198
199 if (urgent_only && !linkwatch_urgent_event(dev)) {
200 list_add_tail(&dev->link_watch_list, &lweventlist);
201 continue;
202 }
203 spin_unlock_irq(&lweventlist_lock);
204 linkwatch_do_dev(dev);
205 do_dev--;
206 spin_lock_irq(&lweventlist_lock);
207 }
208
209
210 list_splice_init(&wrk, &lweventlist);
211
212 if (!list_empty(&lweventlist))
213 linkwatch_schedule_work(0);
214 spin_unlock_irq(&lweventlist_lock);
215 }
216
217 void linkwatch_forget_dev(struct net_device *dev)
218 {
219 unsigned long flags;
220 int clean = 0;
221
222 spin_lock_irqsave(&lweventlist_lock, flags);
223 if (!list_empty(&dev->link_watch_list)) {
224 list_del_init(&dev->link_watch_list);
225 clean = 1;
226 }
227 spin_unlock_irqrestore(&lweventlist_lock, flags);
228 if (clean)
229 linkwatch_do_dev(dev);
230 }
231
232
233
234 void linkwatch_run_queue(void)
235 {
236 __linkwatch_run_queue(0);
237 }
238
239
240 static void linkwatch_event(struct work_struct *dummy)
241 {
242 rtnl_lock();
243 __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
244 rtnl_unlock();
245 }
246
247
248 void linkwatch_fire_event(struct net_device *dev)
249 {
250 bool urgent = linkwatch_urgent_event(dev);
251
252 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
253 linkwatch_add_event(dev);
254 } else if (!urgent)
255 return;
256
257 linkwatch_schedule_work(urgent);
258 }
259 EXPORT_SYMBOL(linkwatch_fire_event);