This source file includes following definitions.
- x25_forward_call
- x25_forward_data
- x25_clear_forward_by_lci
- x25_clear_forward_by_dev
1
2
3
4
5
6
7 #define pr_fmt(fmt) "X25: " fmt
8
9 #include <linux/if_arp.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <net/x25.h>
13
14 LIST_HEAD(x25_forward_list);
15 DEFINE_RWLOCK(x25_forward_list_lock);
16
17 int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
18 struct sk_buff *skb, int lci)
19 {
20 struct x25_route *rt;
21 struct x25_neigh *neigh_new = NULL;
22 struct list_head *entry;
23 struct x25_forward *x25_frwd, *new_frwd;
24 struct sk_buff *skbn;
25 short same_lci = 0;
26 int rc = 0;
27
28 if ((rt = x25_get_route(dest_addr)) == NULL)
29 goto out_no_route;
30
31 if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
32
33
34
35 goto out_put_route;
36 }
37
38
39
40
41 if (rt->dev == from->dev) {
42 goto out_put_nb;
43 }
44
45
46
47
48 read_lock_bh(&x25_forward_list_lock);
49 list_for_each(entry, &x25_forward_list) {
50 x25_frwd = list_entry(entry, struct x25_forward, node);
51 if (x25_frwd->lci == lci) {
52 pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
53 same_lci = 1;
54 }
55 }
56 read_unlock_bh(&x25_forward_list_lock);
57
58
59 if (!same_lci){
60 if ((new_frwd = kmalloc(sizeof(struct x25_forward),
61 GFP_ATOMIC)) == NULL){
62 rc = -ENOMEM;
63 goto out_put_nb;
64 }
65 new_frwd->lci = lci;
66 new_frwd->dev1 = rt->dev;
67 new_frwd->dev2 = from->dev;
68 write_lock_bh(&x25_forward_list_lock);
69 list_add(&new_frwd->node, &x25_forward_list);
70 write_unlock_bh(&x25_forward_list_lock);
71 }
72
73
74 if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
75 goto out_put_nb;
76 }
77 x25_transmit_link(skbn, neigh_new);
78 rc = 1;
79
80
81 out_put_nb:
82 x25_neigh_put(neigh_new);
83
84 out_put_route:
85 x25_route_put(rt);
86
87 out_no_route:
88 return rc;
89 }
90
91
92 int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
93
94 struct x25_forward *frwd;
95 struct list_head *entry;
96 struct net_device *peer = NULL;
97 struct x25_neigh *nb;
98 struct sk_buff *skbn;
99 int rc = 0;
100
101 read_lock_bh(&x25_forward_list_lock);
102 list_for_each(entry, &x25_forward_list) {
103 frwd = list_entry(entry, struct x25_forward, node);
104 if (frwd->lci == lci) {
105
106 if (from->dev == frwd->dev1) {
107 peer = frwd->dev2;
108 } else {
109 peer = frwd->dev1;
110 }
111 break;
112 }
113 }
114 read_unlock_bh(&x25_forward_list_lock);
115
116 if ( (nb = x25_get_neigh(peer)) == NULL)
117 goto out;
118
119 if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
120 goto output;
121
122 }
123 x25_transmit_link(skbn, nb);
124
125 rc = 1;
126 output:
127 x25_neigh_put(nb);
128 out:
129 return rc;
130 }
131
132 void x25_clear_forward_by_lci(unsigned int lci)
133 {
134 struct x25_forward *fwd;
135 struct list_head *entry, *tmp;
136
137 write_lock_bh(&x25_forward_list_lock);
138
139 list_for_each_safe(entry, tmp, &x25_forward_list) {
140 fwd = list_entry(entry, struct x25_forward, node);
141 if (fwd->lci == lci) {
142 list_del(&fwd->node);
143 kfree(fwd);
144 }
145 }
146 write_unlock_bh(&x25_forward_list_lock);
147 }
148
149
150 void x25_clear_forward_by_dev(struct net_device *dev)
151 {
152 struct x25_forward *fwd;
153 struct list_head *entry, *tmp;
154
155 write_lock_bh(&x25_forward_list_lock);
156
157 list_for_each_safe(entry, tmp, &x25_forward_list) {
158 fwd = list_entry(entry, struct x25_forward, node);
159 if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
160 list_del(&fwd->node);
161 kfree(fwd);
162 }
163 }
164 write_unlock_bh(&x25_forward_list_lock);
165 }