1 /*
2  * include/linux/if_team.h - Network team device driver header
3  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10 #ifndef _LINUX_IF_TEAM_H_
11 #define _LINUX_IF_TEAM_H_
12 
13 #include <linux/netpoll.h>
14 #include <net/sch_generic.h>
15 #include <linux/types.h>
16 #include <uapi/linux/if_team.h>
17 
18 struct team_pcpu_stats {
19 	u64			rx_packets;
20 	u64			rx_bytes;
21 	u64			rx_multicast;
22 	u64			tx_packets;
23 	u64			tx_bytes;
24 	struct u64_stats_sync	syncp;
25 	u32			rx_dropped;
26 	u32			tx_dropped;
27 };
28 
29 struct team;
30 
31 struct team_port {
32 	struct net_device *dev;
33 	struct hlist_node hlist; /* node in enabled ports hash list */
34 	struct list_head list; /* node in ordinary list */
35 	struct team *team;
36 	int index; /* index of enabled port. If disabled, it's set to -1 */
37 
38 	bool linkup; /* either state.linkup or user.linkup */
39 
40 	struct {
41 		bool linkup;
42 		u32 speed;
43 		u8 duplex;
44 	} state;
45 
46 	/* Values set by userspace */
47 	struct {
48 		bool linkup;
49 		bool linkup_enabled;
50 	} user;
51 
52 	/* Custom gennetlink interface related flags */
53 	bool changed;
54 	bool removed;
55 
56 	/*
57 	 * A place for storing original values of the device before it
58 	 * become a port.
59 	 */
60 	struct {
61 		unsigned char dev_addr[MAX_ADDR_LEN];
62 		unsigned int mtu;
63 	} orig;
64 
65 #ifdef CONFIG_NET_POLL_CONTROLLER
66 	struct netpoll *np;
67 #endif
68 
69 	s32 priority; /* lower number ~ higher priority */
70 	u16 queue_id;
71 	struct list_head qom_list; /* node in queue override mapping list */
72 	struct rcu_head	rcu;
73 	long mode_priv[0];
74 };
75 
team_port_enabled(struct team_port * port)76 static inline bool team_port_enabled(struct team_port *port)
77 {
78 	return port->index != -1;
79 }
80 
team_port_txable(struct team_port * port)81 static inline bool team_port_txable(struct team_port *port)
82 {
83 	return port->linkup && team_port_enabled(port);
84 }
85 
86 #ifdef CONFIG_NET_POLL_CONTROLLER
team_netpoll_send_skb(struct team_port * port,struct sk_buff * skb)87 static inline void team_netpoll_send_skb(struct team_port *port,
88 					 struct sk_buff *skb)
89 {
90 	struct netpoll *np = port->np;
91 
92 	if (np)
93 		netpoll_send_skb(np, skb);
94 }
95 #else
team_netpoll_send_skb(struct team_port * port,struct sk_buff * skb)96 static inline void team_netpoll_send_skb(struct team_port *port,
97 					 struct sk_buff *skb)
98 {
99 }
100 #endif
101 
102 struct team_mode_ops {
103 	int (*init)(struct team *team);
104 	void (*exit)(struct team *team);
105 	rx_handler_result_t (*receive)(struct team *team,
106 				       struct team_port *port,
107 				       struct sk_buff *skb);
108 	bool (*transmit)(struct team *team, struct sk_buff *skb);
109 	int (*port_enter)(struct team *team, struct team_port *port);
110 	void (*port_leave)(struct team *team, struct team_port *port);
111 	void (*port_change_dev_addr)(struct team *team, struct team_port *port);
112 	void (*port_enabled)(struct team *team, struct team_port *port);
113 	void (*port_disabled)(struct team *team, struct team_port *port);
114 };
115 
116 extern int team_modeop_port_enter(struct team *team, struct team_port *port);
117 extern void team_modeop_port_change_dev_addr(struct team *team,
118 					     struct team_port *port);
119 
120 enum team_option_type {
121 	TEAM_OPTION_TYPE_U32,
122 	TEAM_OPTION_TYPE_STRING,
123 	TEAM_OPTION_TYPE_BINARY,
124 	TEAM_OPTION_TYPE_BOOL,
125 	TEAM_OPTION_TYPE_S32,
126 };
127 
128 struct team_option_inst_info {
129 	u32 array_index;
130 	struct team_port *port; /* != NULL if per-port */
131 };
132 
133 struct team_gsetter_ctx {
134 	union {
135 		u32 u32_val;
136 		const char *str_val;
137 		struct {
138 			const void *ptr;
139 			u32 len;
140 		} bin_val;
141 		bool bool_val;
142 		s32 s32_val;
143 	} data;
144 	struct team_option_inst_info *info;
145 };
146 
147 struct team_option {
148 	struct list_head list;
149 	const char *name;
150 	bool per_port;
151 	unsigned int array_size; /* != 0 means the option is array */
152 	enum team_option_type type;
153 	int (*init)(struct team *team, struct team_option_inst_info *info);
154 	int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
155 	int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
156 };
157 
158 extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
159 extern void team_options_change_check(struct team *team);
160 
161 struct team_mode {
162 	const char *kind;
163 	struct module *owner;
164 	size_t priv_size;
165 	size_t port_priv_size;
166 	const struct team_mode_ops *ops;
167 };
168 
169 #define TEAM_PORT_HASHBITS 4
170 #define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
171 
172 #define TEAM_MODE_PRIV_LONGS 4
173 #define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
174 
175 struct team {
176 	struct net_device *dev; /* associated netdevice */
177 	struct team_pcpu_stats __percpu *pcpu_stats;
178 
179 	struct mutex lock; /* used for overall locking, e.g. port lists write */
180 
181 	/*
182 	 * List of enabled ports and their count
183 	 */
184 	int en_port_count;
185 	struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
186 
187 	struct list_head port_list; /* list of all ports */
188 
189 	struct list_head option_list;
190 	struct list_head option_inst_list; /* list of option instances */
191 
192 	const struct team_mode *mode;
193 	struct team_mode_ops ops;
194 	bool user_carrier_enabled;
195 	bool queue_override_enabled;
196 	struct list_head *qom_lists; /* array of queue override mapping lists */
197 	bool port_mtu_change_allowed;
198 	struct {
199 		unsigned int count;
200 		unsigned int interval; /* in ms */
201 		atomic_t count_pending;
202 		struct delayed_work dw;
203 	} notify_peers;
204 	struct {
205 		unsigned int count;
206 		unsigned int interval; /* in ms */
207 		atomic_t count_pending;
208 		struct delayed_work dw;
209 	} mcast_rejoin;
210 	long mode_priv[TEAM_MODE_PRIV_LONGS];
211 };
212 
team_dev_queue_xmit(struct team * team,struct team_port * port,struct sk_buff * skb)213 static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
214 				      struct sk_buff *skb)
215 {
216 	BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
217 		     sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
218 	skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
219 
220 	skb->dev = port->dev;
221 	if (unlikely(netpoll_tx_running(team->dev))) {
222 		team_netpoll_send_skb(port, skb);
223 		return 0;
224 	}
225 	return dev_queue_xmit(skb);
226 }
227 
team_port_index_hash(struct team * team,int port_index)228 static inline struct hlist_head *team_port_index_hash(struct team *team,
229 						      int port_index)
230 {
231 	return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
232 }
233 
team_get_port_by_index(struct team * team,int port_index)234 static inline struct team_port *team_get_port_by_index(struct team *team,
235 						       int port_index)
236 {
237 	struct team_port *port;
238 	struct hlist_head *head = team_port_index_hash(team, port_index);
239 
240 	hlist_for_each_entry(port, head, hlist)
241 		if (port->index == port_index)
242 			return port;
243 	return NULL;
244 }
245 
team_num_to_port_index(struct team * team,int num)246 static inline int team_num_to_port_index(struct team *team, int num)
247 {
248 	int en_port_count = ACCESS_ONCE(team->en_port_count);
249 
250 	if (unlikely(!en_port_count))
251 		return 0;
252 	return num % en_port_count;
253 }
254 
team_get_port_by_index_rcu(struct team * team,int port_index)255 static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
256 							   int port_index)
257 {
258 	struct team_port *port;
259 	struct hlist_head *head = team_port_index_hash(team, port_index);
260 
261 	hlist_for_each_entry_rcu(port, head, hlist)
262 		if (port->index == port_index)
263 			return port;
264 	return NULL;
265 }
266 
267 static inline struct team_port *
team_get_first_port_txable_rcu(struct team * team,struct team_port * port)268 team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
269 {
270 	struct team_port *cur;
271 
272 	if (likely(team_port_txable(port)))
273 		return port;
274 	cur = port;
275 	list_for_each_entry_continue_rcu(cur, &team->port_list, list)
276 		if (team_port_txable(cur))
277 			return cur;
278 	list_for_each_entry_rcu(cur, &team->port_list, list) {
279 		if (cur == port)
280 			break;
281 		if (team_port_txable(cur))
282 			return cur;
283 	}
284 	return NULL;
285 }
286 
287 extern int team_options_register(struct team *team,
288 				 const struct team_option *option,
289 				 size_t option_count);
290 extern void team_options_unregister(struct team *team,
291 				    const struct team_option *option,
292 				    size_t option_count);
293 extern int team_mode_register(const struct team_mode *mode);
294 extern void team_mode_unregister(const struct team_mode *mode);
295 
296 #define TEAM_DEFAULT_NUM_TX_QUEUES 16
297 #define TEAM_DEFAULT_NUM_RX_QUEUES 16
298 
299 #endif /* _LINUX_IF_TEAM_H_ */
300