1/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "send.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/if_ether.h>
26#include <linux/if.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/netdevice.h>
31#include <linux/printk.h>
32#include <linux/rculist.h>
33#include <linux/rcupdate.h>
34#include <linux/skbuff.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/stddef.h>
38#include <linux/workqueue.h>
39
40#include "distributed-arp-table.h"
41#include "fragmentation.h"
42#include "gateway_client.h"
43#include "hard-interface.h"
44#include "network-coding.h"
45#include "originator.h"
46#include "routing.h"
47#include "soft-interface.h"
48#include "translation-table.h"
49
50static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
51
52/* send out an already prepared packet to the given address via the
53 * specified batman interface
54 */
55int batadv_send_skb_packet(struct sk_buff *skb,
56			   struct batadv_hard_iface *hard_iface,
57			   const u8 *dst_addr)
58{
59	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
60	struct ethhdr *ethhdr;
61
62	if (hard_iface->if_status != BATADV_IF_ACTIVE)
63		goto send_skb_err;
64
65	if (unlikely(!hard_iface->net_dev))
66		goto send_skb_err;
67
68	if (!(hard_iface->net_dev->flags & IFF_UP)) {
69		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
70			hard_iface->net_dev->name);
71		goto send_skb_err;
72	}
73
74	/* push to the ethernet header. */
75	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
76		goto send_skb_err;
77
78	skb_reset_mac_header(skb);
79
80	ethhdr = eth_hdr(skb);
81	ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
82	ether_addr_copy(ethhdr->h_dest, dst_addr);
83	ethhdr->h_proto = htons(ETH_P_BATMAN);
84
85	skb_set_network_header(skb, ETH_HLEN);
86	skb->protocol = htons(ETH_P_BATMAN);
87
88	skb->dev = hard_iface->net_dev;
89
90	/* Save a clone of the skb to use when decoding coded packets */
91	batadv_nc_skb_store_for_decoding(bat_priv, skb);
92
93	/* dev_queue_xmit() returns a negative result on error.	 However on
94	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95	 * (which is > 0). This will not be treated as an error.
96	 */
97	return dev_queue_xmit(skb);
98send_skb_err:
99	kfree_skb(skb);
100	return NET_XMIT_DROP;
101}
102
103/**
104 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
105 * @skb: Packet to be transmitted.
106 * @orig_node: Final destination of the packet.
107 * @recv_if: Interface used when receiving the packet (can be NULL).
108 *
109 * Looks up the best next-hop towards the passed originator and passes the
110 * skb on for preparation of MAC header. If the packet originated from this
111 * host, NULL can be passed as recv_if and no interface alternating is
112 * attempted.
113 *
114 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
115 * NET_XMIT_POLICED if the skb is buffered for later transmit.
116 */
117int batadv_send_skb_to_orig(struct sk_buff *skb,
118			    struct batadv_orig_node *orig_node,
119			    struct batadv_hard_iface *recv_if)
120{
121	struct batadv_priv *bat_priv = orig_node->bat_priv;
122	struct batadv_neigh_node *neigh_node;
123	int ret = NET_XMIT_DROP;
124
125	/* batadv_find_router() increases neigh_nodes refcount if found. */
126	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
127	if (!neigh_node)
128		goto out;
129
130	/* Check if the skb is too large to send in one piece and fragment
131	 * it if needed.
132	 */
133	if (atomic_read(&bat_priv->fragmentation) &&
134	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
135		/* Fragment and send packet. */
136		if (batadv_frag_send_packet(skb, orig_node, neigh_node))
137			ret = NET_XMIT_SUCCESS;
138
139		goto out;
140	}
141
142	/* try to network code the packet, if it is received on an interface
143	 * (i.e. being forwarded). If the packet originates from this node or if
144	 * network coding fails, then send the packet as usual.
145	 */
146	if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
147		ret = NET_XMIT_POLICED;
148	} else {
149		batadv_send_skb_packet(skb, neigh_node->if_incoming,
150				       neigh_node->addr);
151		ret = NET_XMIT_SUCCESS;
152	}
153
154out:
155	if (neigh_node)
156		batadv_neigh_node_free_ref(neigh_node);
157
158	return ret;
159}
160
161/**
162 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
163 *  common fields for unicast packets
164 * @skb: the skb carrying the unicast header to initialize
165 * @hdr_size: amount of bytes to push at the beginning of the skb
166 * @orig_node: the destination node
167 *
168 * Returns false if the buffer extension was not possible or true otherwise.
169 */
170static bool
171batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
172				  struct batadv_orig_node *orig_node)
173{
174	struct batadv_unicast_packet *unicast_packet;
175	u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
176
177	if (batadv_skb_head_push(skb, hdr_size) < 0)
178		return false;
179
180	unicast_packet = (struct batadv_unicast_packet *)skb->data;
181	unicast_packet->version = BATADV_COMPAT_VERSION;
182	/* batman packet type: unicast */
183	unicast_packet->packet_type = BATADV_UNICAST;
184	/* set unicast ttl */
185	unicast_packet->ttl = BATADV_TTL;
186	/* copy the destination for faster routing */
187	ether_addr_copy(unicast_packet->dest, orig_node->orig);
188	/* set the destination tt version number */
189	unicast_packet->ttvn = ttvn;
190
191	return true;
192}
193
194/**
195 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
196 * @skb: the skb containing the payload to encapsulate
197 * @orig_node: the destination node
198 *
199 * Returns false if the payload could not be encapsulated or true otherwise.
200 */
201static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
202					    struct batadv_orig_node *orig_node)
203{
204	size_t uni_size = sizeof(struct batadv_unicast_packet);
205
206	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
207}
208
209/**
210 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
211 *  unicast 4addr header
212 * @bat_priv: the bat priv with all the soft interface information
213 * @skb: the skb containing the payload to encapsulate
214 * @orig_node: the destination node
215 * @packet_subtype: the unicast 4addr packet subtype to use
216 *
217 * Returns false if the payload could not be encapsulated or true otherwise.
218 */
219bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
220					   struct sk_buff *skb,
221					   struct batadv_orig_node *orig,
222					   int packet_subtype)
223{
224	struct batadv_hard_iface *primary_if;
225	struct batadv_unicast_4addr_packet *uc_4addr_packet;
226	bool ret = false;
227
228	primary_if = batadv_primary_if_get_selected(bat_priv);
229	if (!primary_if)
230		goto out;
231
232	/* Pull the header space and fill the unicast_packet substructure.
233	 * We can do that because the first member of the uc_4addr_packet
234	 * is of type struct unicast_packet
235	 */
236	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
237					       orig))
238		goto out;
239
240	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
241	uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
242	ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
243	uc_4addr_packet->subtype = packet_subtype;
244	uc_4addr_packet->reserved = 0;
245
246	ret = true;
247out:
248	if (primary_if)
249		batadv_hardif_free_ref(primary_if);
250	return ret;
251}
252
253/**
254 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
255 * @bat_priv: the bat priv with all the soft interface information
256 * @skb: payload to send
257 * @packet_type: the batman unicast packet type to use
258 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
259 *  4addr packets)
260 * @orig_node: the originator to send the packet to
261 * @vid: the vid to be used to search the translation table
262 *
263 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
264 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
265 * as packet_type. Then send this frame to the given orig_node and release a
266 * reference to this orig_node.
267 *
268 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
269 */
270int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
271			    struct sk_buff *skb, int packet_type,
272			    int packet_subtype,
273			    struct batadv_orig_node *orig_node,
274			    unsigned short vid)
275{
276	struct batadv_unicast_packet *unicast_packet;
277	struct ethhdr *ethhdr;
278	int ret = NET_XMIT_DROP;
279
280	if (!orig_node)
281		goto out;
282
283	switch (packet_type) {
284	case BATADV_UNICAST:
285		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
286			goto out;
287		break;
288	case BATADV_UNICAST_4ADDR:
289		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
290							   orig_node,
291							   packet_subtype))
292			goto out;
293		break;
294	default:
295		/* this function supports UNICAST and UNICAST_4ADDR only. It
296		 * should never be invoked with any other packet type
297		 */
298		goto out;
299	}
300
301	/* skb->data might have been reallocated by
302	 * batadv_send_skb_prepare_unicast{,_4addr}()
303	 */
304	ethhdr = eth_hdr(skb);
305	unicast_packet = (struct batadv_unicast_packet *)skb->data;
306
307	/* inform the destination node that we are still missing a correct route
308	 * for this client. The destination will receive this packet and will
309	 * try to reroute it because the ttvn contained in the header is less
310	 * than the current one
311	 */
312	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
313		unicast_packet->ttvn = unicast_packet->ttvn - 1;
314
315	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
316		ret = NET_XMIT_SUCCESS;
317
318out:
319	if (orig_node)
320		batadv_orig_node_free_ref(orig_node);
321	if (ret == NET_XMIT_DROP)
322		kfree_skb(skb);
323	return ret;
324}
325
326/**
327 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
328 * @bat_priv: the bat priv with all the soft interface information
329 * @skb: payload to send
330 * @packet_type: the batman unicast packet type to use
331 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
332 *  4addr packets)
333 * @dst_hint: can be used to override the destination contained in the skb
334 * @vid: the vid to be used to search the translation table
335 *
336 * Look up the recipient node for the destination address in the ethernet
337 * header via the translation table. Wrap the given skb into a batman-adv
338 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
339 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
340 * to the according destination node.
341 *
342 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
343 */
344int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
345				   struct sk_buff *skb, int packet_type,
346				   int packet_subtype, u8 *dst_hint,
347				   unsigned short vid)
348{
349	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
350	struct batadv_orig_node *orig_node;
351	u8 *src, *dst;
352
353	src = ethhdr->h_source;
354	dst = ethhdr->h_dest;
355
356	/* if we got an hint! let's send the packet to this client (if any) */
357	if (dst_hint) {
358		src = NULL;
359		dst = dst_hint;
360	}
361	orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
362
363	return batadv_send_skb_unicast(bat_priv, skb, packet_type,
364				       packet_subtype, orig_node, vid);
365}
366
367/**
368 * batadv_send_skb_via_gw - send an skb via gateway lookup
369 * @bat_priv: the bat priv with all the soft interface information
370 * @skb: payload to send
371 * @vid: the vid to be used to search the translation table
372 *
373 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
374 * unicast header and send this frame to this gateway node.
375 *
376 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
377 */
378int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
379			   unsigned short vid)
380{
381	struct batadv_orig_node *orig_node;
382
383	orig_node = batadv_gw_get_selected_orig(bat_priv);
384	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
385				       orig_node, vid);
386}
387
388void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
389{
390	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
391
392	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
393	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
394		return;
395
396	/* the interface gets activated here to avoid race conditions between
397	 * the moment of activating the interface in
398	 * hardif_activate_interface() where the originator mac is set and
399	 * outdated packets (especially uninitialized mac addresses) in the
400	 * packet queue
401	 */
402	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
403		hard_iface->if_status = BATADV_IF_ACTIVE;
404
405	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
406}
407
408static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
409{
410	if (forw_packet->skb)
411		kfree_skb(forw_packet->skb);
412	if (forw_packet->if_incoming)
413		batadv_hardif_free_ref(forw_packet->if_incoming);
414	if (forw_packet->if_outgoing)
415		batadv_hardif_free_ref(forw_packet->if_outgoing);
416	kfree(forw_packet);
417}
418
419static void
420_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
421				 struct batadv_forw_packet *forw_packet,
422				 unsigned long send_time)
423{
424	/* add new packet to packet list */
425	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
426	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
427	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
428
429	/* start timer for this packet */
430	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
431			   send_time);
432}
433
434/* add a broadcast packet to the queue and setup timers. broadcast packets
435 * are sent multiple times to increase probability for being received.
436 *
437 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
438 * errors.
439 *
440 * The skb is not consumed, so the caller should make sure that the
441 * skb is freed.
442 */
443int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
444				    const struct sk_buff *skb,
445				    unsigned long delay)
446{
447	struct batadv_hard_iface *primary_if = NULL;
448	struct batadv_forw_packet *forw_packet;
449	struct batadv_bcast_packet *bcast_packet;
450	struct sk_buff *newskb;
451
452	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
453		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
454			   "bcast packet queue full\n");
455		goto out;
456	}
457
458	primary_if = batadv_primary_if_get_selected(bat_priv);
459	if (!primary_if)
460		goto out_and_inc;
461
462	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
463
464	if (!forw_packet)
465		goto out_and_inc;
466
467	newskb = skb_copy(skb, GFP_ATOMIC);
468	if (!newskb)
469		goto packet_free;
470
471	/* as we have a copy now, it is safe to decrease the TTL */
472	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
473	bcast_packet->ttl--;
474
475	skb_reset_mac_header(newskb);
476
477	forw_packet->skb = newskb;
478	forw_packet->if_incoming = primary_if;
479	forw_packet->if_outgoing = NULL;
480
481	/* how often did we send the bcast packet ? */
482	forw_packet->num_packets = 0;
483
484	INIT_DELAYED_WORK(&forw_packet->delayed_work,
485			  batadv_send_outstanding_bcast_packet);
486
487	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
488	return NETDEV_TX_OK;
489
490packet_free:
491	kfree(forw_packet);
492out_and_inc:
493	atomic_inc(&bat_priv->bcast_queue_left);
494out:
495	if (primary_if)
496		batadv_hardif_free_ref(primary_if);
497	return NETDEV_TX_BUSY;
498}
499
500static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
501{
502	struct batadv_hard_iface *hard_iface;
503	struct delayed_work *delayed_work;
504	struct batadv_forw_packet *forw_packet;
505	struct sk_buff *skb1;
506	struct net_device *soft_iface;
507	struct batadv_priv *bat_priv;
508
509	delayed_work = container_of(work, struct delayed_work, work);
510	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
511				   delayed_work);
512	soft_iface = forw_packet->if_incoming->soft_iface;
513	bat_priv = netdev_priv(soft_iface);
514
515	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
516	hlist_del(&forw_packet->list);
517	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
518
519	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
520		goto out;
521
522	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
523		goto out;
524
525	/* rebroadcast packet */
526	rcu_read_lock();
527	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
528		if (hard_iface->soft_iface != soft_iface)
529			continue;
530
531		if (forw_packet->num_packets >= hard_iface->num_bcasts)
532			continue;
533
534		/* send a copy of the saved skb */
535		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
536		if (skb1)
537			batadv_send_skb_packet(skb1, hard_iface,
538					       batadv_broadcast_addr);
539	}
540	rcu_read_unlock();
541
542	forw_packet->num_packets++;
543
544	/* if we still have some more bcasts to send */
545	if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
546		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
547						 msecs_to_jiffies(5));
548		return;
549	}
550
551out:
552	batadv_forw_packet_free(forw_packet);
553	atomic_inc(&bat_priv->bcast_queue_left);
554}
555
556void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
557{
558	struct delayed_work *delayed_work;
559	struct batadv_forw_packet *forw_packet;
560	struct batadv_priv *bat_priv;
561
562	delayed_work = container_of(work, struct delayed_work, work);
563	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
564				   delayed_work);
565	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
566	spin_lock_bh(&bat_priv->forw_bat_list_lock);
567	hlist_del(&forw_packet->list);
568	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
569
570	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
571		goto out;
572
573	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
574
575	/* we have to have at least one packet in the queue to determine the
576	 * queues wake up time unless we are shutting down.
577	 *
578	 * only re-schedule if this is the "original" copy, e.g. the OGM of the
579	 * primary interface should only be rescheduled once per period, but
580	 * this function will be called for the forw_packet instances of the
581	 * other secondary interfaces as well.
582	 */
583	if (forw_packet->own &&
584	    forw_packet->if_incoming == forw_packet->if_outgoing)
585		batadv_schedule_bat_ogm(forw_packet->if_incoming);
586
587out:
588	/* don't count own packet */
589	if (!forw_packet->own)
590		atomic_inc(&bat_priv->batman_queue_left);
591
592	batadv_forw_packet_free(forw_packet);
593}
594
595void
596batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
597				 const struct batadv_hard_iface *hard_iface)
598{
599	struct batadv_forw_packet *forw_packet;
600	struct hlist_node *safe_tmp_node;
601	bool pending;
602
603	if (hard_iface)
604		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
605			   "purge_outstanding_packets(): %s\n",
606			   hard_iface->net_dev->name);
607	else
608		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
609			   "purge_outstanding_packets()\n");
610
611	/* free bcast list */
612	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
613	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
614				  &bat_priv->forw_bcast_list, list) {
615		/* if purge_outstanding_packets() was called with an argument
616		 * we delete only packets belonging to the given interface
617		 */
618		if ((hard_iface) &&
619		    (forw_packet->if_incoming != hard_iface) &&
620		    (forw_packet->if_outgoing != hard_iface))
621			continue;
622
623		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
624
625		/* batadv_send_outstanding_bcast_packet() will lock the list to
626		 * delete the item from the list
627		 */
628		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
629		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
630
631		if (pending) {
632			hlist_del(&forw_packet->list);
633			if (!forw_packet->own)
634				atomic_inc(&bat_priv->bcast_queue_left);
635
636			batadv_forw_packet_free(forw_packet);
637		}
638	}
639	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
640
641	/* free batman packet list */
642	spin_lock_bh(&bat_priv->forw_bat_list_lock);
643	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
644				  &bat_priv->forw_bat_list, list) {
645		/* if purge_outstanding_packets() was called with an argument
646		 * we delete only packets belonging to the given interface
647		 */
648		if ((hard_iface) &&
649		    (forw_packet->if_incoming != hard_iface) &&
650		    (forw_packet->if_outgoing != hard_iface))
651			continue;
652
653		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
654
655		/* send_outstanding_bat_packet() will lock the list to
656		 * delete the item from the list
657		 */
658		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
659		spin_lock_bh(&bat_priv->forw_bat_list_lock);
660
661		if (pending) {
662			hlist_del(&forw_packet->list);
663			if (!forw_packet->own)
664				atomic_inc(&bat_priv->batman_queue_left);
665
666			batadv_forw_packet_free(forw_packet);
667		}
668	}
669	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
670}
671