This source file includes following definitions.
- nfp_ccm_all_tags_busy
- nfp_ccm_alloc_tag
- nfp_ccm_free_tag
- __nfp_ccm_reply
- nfp_ccm_reply
- nfp_ccm_reply_drop_tag
- nfp_ccm_wait_reply
- nfp_ccm_communicate
- nfp_ccm_rx
- nfp_ccm_init
- nfp_ccm_clean
1
2
3
4 #include <linux/bitops.h>
5
6 #include "ccm.h"
7 #include "nfp_app.h"
8 #include "nfp_net.h"
9
10 #define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
11
12 #define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
13
14 static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
15 {
16 u16 used_tags;
17
18 used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
19
20 return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
21 }
22
23 static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
24 {
25
26
27
28
29 if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
30 ccm_warn(ccm->app, "all FW request contexts busy!\n");
31 return -EAGAIN;
32 }
33
34 WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
35 return ccm->tag_alloc_next++;
36 }
37
38 static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
39 {
40 WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
41
42 while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
43 ccm->tag_alloc_last != ccm->tag_alloc_next)
44 ccm->tag_alloc_last++;
45 }
46
47 static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
48 {
49 unsigned int msg_tag;
50 struct sk_buff *skb;
51
52 skb_queue_walk(&ccm->replies, skb) {
53 msg_tag = nfp_ccm_get_tag(skb);
54 if (msg_tag == tag) {
55 nfp_ccm_free_tag(ccm, tag);
56 __skb_unlink(skb, &ccm->replies);
57 return skb;
58 }
59 }
60
61 return NULL;
62 }
63
64 static struct sk_buff *
65 nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
66 {
67 struct sk_buff *skb;
68
69 nfp_ctrl_lock(app->ctrl);
70 skb = __nfp_ccm_reply(ccm, tag);
71 nfp_ctrl_unlock(app->ctrl);
72
73 return skb;
74 }
75
76 static struct sk_buff *
77 nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
78 {
79 struct sk_buff *skb;
80
81 nfp_ctrl_lock(app->ctrl);
82 skb = __nfp_ccm_reply(ccm, tag);
83 if (!skb)
84 nfp_ccm_free_tag(ccm, tag);
85 nfp_ctrl_unlock(app->ctrl);
86
87 return skb;
88 }
89
90 static struct sk_buff *
91 nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
92 enum nfp_ccm_type type, int tag)
93 {
94 struct sk_buff *skb;
95 int i, err;
96
97 for (i = 0; i < 50; i++) {
98 udelay(4);
99 skb = nfp_ccm_reply(ccm, app, tag);
100 if (skb)
101 return skb;
102 }
103
104 err = wait_event_interruptible_timeout(ccm->wq,
105 skb = nfp_ccm_reply(ccm, app,
106 tag),
107 msecs_to_jiffies(5000));
108
109
110
111 if (!skb)
112 skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
113 if (err < 0) {
114 ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
115 err == ERESTARTSYS ? "interrupted" : "error",
116 type, err);
117 return ERR_PTR(err);
118 }
119 if (!skb) {
120 ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
121 return ERR_PTR(-ETIMEDOUT);
122 }
123
124 return skb;
125 }
126
127 struct sk_buff *
128 nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
129 enum nfp_ccm_type type, unsigned int reply_size)
130 {
131 struct nfp_app *app = ccm->app;
132 struct nfp_ccm_hdr *hdr;
133 int reply_type, tag;
134
135 nfp_ctrl_lock(app->ctrl);
136 tag = nfp_ccm_alloc_tag(ccm);
137 if (tag < 0) {
138 nfp_ctrl_unlock(app->ctrl);
139 dev_kfree_skb_any(skb);
140 return ERR_PTR(tag);
141 }
142
143 hdr = (void *)skb->data;
144 hdr->ver = NFP_CCM_ABI_VERSION;
145 hdr->type = type;
146 hdr->tag = cpu_to_be16(tag);
147
148 __nfp_app_ctrl_tx(app, skb);
149
150 nfp_ctrl_unlock(app->ctrl);
151
152 skb = nfp_ccm_wait_reply(ccm, app, type, tag);
153 if (IS_ERR(skb))
154 return skb;
155
156 reply_type = nfp_ccm_get_type(skb);
157 if (reply_type != __NFP_CCM_REPLY(type)) {
158 ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
159 reply_type, __NFP_CCM_REPLY(type));
160 goto err_free;
161 }
162
163 if (reply_size && skb->len != reply_size) {
164 ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
165 type, skb->len, reply_size);
166 goto err_free;
167 }
168
169 return skb;
170 err_free:
171 dev_kfree_skb_any(skb);
172 return ERR_PTR(-EIO);
173 }
174
175 void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
176 {
177 struct nfp_app *app = ccm->app;
178 unsigned int tag;
179
180 if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
181 ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
182 goto err_free;
183 }
184
185 nfp_ctrl_lock(app->ctrl);
186
187 tag = nfp_ccm_get_tag(skb);
188 if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
189 ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
190 tag);
191 goto err_unlock;
192 }
193
194 __skb_queue_tail(&ccm->replies, skb);
195 wake_up_interruptible_all(&ccm->wq);
196
197 nfp_ctrl_unlock(app->ctrl);
198 return;
199
200 err_unlock:
201 nfp_ctrl_unlock(app->ctrl);
202 err_free:
203 dev_kfree_skb_any(skb);
204 }
205
206 int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
207 {
208 ccm->app = app;
209 skb_queue_head_init(&ccm->replies);
210 init_waitqueue_head(&ccm->wq);
211 return 0;
212 }
213
214 void nfp_ccm_clean(struct nfp_ccm *ccm)
215 {
216 WARN_ON(!skb_queue_empty(&ccm->replies));
217 }