This source file includes following definitions.
- mt76x02u_multiple_mcu_reads
- mt76x02u_mcu_wait_resp
- __mt76x02u_mcu_send_msg
- mt76x02u_mcu_send_msg
- skb_put_le32
- mt76x02u_mcu_wr_rp
- mt76x02u_mcu_rd_rp
- mt76x02u_mcu_fw_reset
- __mt76x02u_mcu_fw_send_data
- mt76x02u_mcu_fw_send_data
- mt76x02u_init_mcu
1
2
3
4
5
6 #include <linux/module.h>
7 #include <linux/firmware.h>
8
9 #include "mt76x02.h"
10 #include "mt76x02_mcu.h"
11 #include "mt76x02_usb.h"
12
13 #define MT_CMD_HDR_LEN 4
14
15 #define MT_FCE_DMA_ADDR 0x0230
16 #define MT_FCE_DMA_LEN 0x0234
17
18 #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
19
20 static void
21 mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
22 {
23 struct mt76_usb *usb = &dev->usb;
24 u32 reg, val;
25 int i;
26
27 if (usb->mcu.burst) {
28 WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
29
30 reg = usb->mcu.rp[0].reg - usb->mcu.base;
31 for (i = 0; i < usb->mcu.rp_len; i++) {
32 val = get_unaligned_le32(data + 4 * i);
33 usb->mcu.rp[i].reg = reg++;
34 usb->mcu.rp[i].value = val;
35 }
36 } else {
37 WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
38
39 for (i = 0; i < usb->mcu.rp_len; i++) {
40 reg = get_unaligned_le32(data + 8 * i) -
41 usb->mcu.base;
42 val = get_unaligned_le32(data + 8 * i + 4);
43
44 WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
45 usb->mcu.rp[i].value = val;
46 }
47 }
48 }
49
50 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
51 {
52 struct mt76_usb *usb = &dev->usb;
53 u8 *data = usb->mcu.data;
54 int i, len, ret;
55 u32 rxfce;
56
57 for (i = 0; i < 5; i++) {
58 ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300);
59 if (ret == -ETIMEDOUT)
60 continue;
61 if (ret)
62 goto out;
63
64 if (usb->mcu.rp)
65 mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
66
67 rxfce = get_unaligned_le32(data);
68 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
69 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
70 return 0;
71
72 dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
73 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
74 seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
75 }
76 out:
77 dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
78 return ret;
79 }
80
81 static int
82 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
83 int cmd, bool wait_resp)
84 {
85 struct mt76_usb *usb = &dev->usb;
86 int ret;
87 u8 seq = 0;
88 u32 info;
89
90 if (test_bit(MT76_REMOVED, &dev->state))
91 return 0;
92
93 if (wait_resp) {
94 seq = ++usb->mcu.msg_seq & 0xf;
95 if (!seq)
96 seq = ++usb->mcu.msg_seq & 0xf;
97 }
98
99 info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
100 FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
101 MT_MCU_MSG_TYPE_CMD;
102 ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
103 if (ret)
104 return ret;
105
106 ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500);
107 if (ret)
108 return ret;
109
110 if (wait_resp)
111 ret = mt76x02u_mcu_wait_resp(dev, seq);
112
113 consume_skb(skb);
114
115 return ret;
116 }
117
118 static int
119 mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
120 int len, bool wait_resp)
121 {
122 struct mt76_usb *usb = &dev->usb;
123 struct sk_buff *skb;
124 int err;
125
126 skb = mt76_mcu_msg_alloc(data, MT_CMD_HDR_LEN, len, 8);
127 if (!skb)
128 return -ENOMEM;
129
130 mutex_lock(&usb->mcu.mutex);
131 err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
132 mutex_unlock(&usb->mcu.mutex);
133
134 return err;
135 }
136
137 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
138 {
139 put_unaligned_le32(val, skb_put(skb, 4));
140 }
141
142 static int
143 mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
144 const struct mt76_reg_pair *data, int n)
145 {
146 const int CMD_RANDOM_WRITE = 12;
147 const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
148 struct mt76_usb *usb = &dev->usb;
149 struct sk_buff *skb;
150 int cnt, i, ret;
151
152 if (!n)
153 return 0;
154
155 cnt = min(max_vals_per_cmd, n);
156
157 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
158 if (!skb)
159 return -ENOMEM;
160 skb_reserve(skb, MT_DMA_HDR_LEN);
161
162 for (i = 0; i < cnt; i++) {
163 skb_put_le32(skb, base + data[i].reg);
164 skb_put_le32(skb, data[i].value);
165 }
166
167 mutex_lock(&usb->mcu.mutex);
168 ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
169 mutex_unlock(&usb->mcu.mutex);
170 if (ret)
171 return ret;
172
173 return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
174 }
175
176 static int
177 mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
178 struct mt76_reg_pair *data, int n)
179 {
180 const int CMD_RANDOM_READ = 10;
181 const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
182 struct mt76_usb *usb = &dev->usb;
183 struct sk_buff *skb;
184 int cnt, i, ret;
185
186 if (!n)
187 return 0;
188
189 cnt = min(max_vals_per_cmd, n);
190 if (cnt != n)
191 return -EINVAL;
192
193 skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
194 if (!skb)
195 return -ENOMEM;
196 skb_reserve(skb, MT_DMA_HDR_LEN);
197
198 for (i = 0; i < cnt; i++) {
199 skb_put_le32(skb, base + data[i].reg);
200 skb_put_le32(skb, data[i].value);
201 }
202
203 mutex_lock(&usb->mcu.mutex);
204
205 usb->mcu.rp = data;
206 usb->mcu.rp_len = n;
207 usb->mcu.base = base;
208 usb->mcu.burst = false;
209
210 ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
211
212 usb->mcu.rp = NULL;
213
214 mutex_unlock(&usb->mcu.mutex);
215
216 return ret;
217 }
218
219 void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
220 {
221 mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
222 USB_DIR_OUT | USB_TYPE_VENDOR,
223 0x1, 0, NULL, 0);
224 }
225 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
226
227 static int
228 __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
229 const void *fw_data, int len, u32 dst_addr)
230 {
231 __le32 info;
232 u32 val;
233 int err, data_len;
234
235 info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
236 FIELD_PREP(MT_MCU_MSG_LEN, len) |
237 MT_MCU_MSG_TYPE_CMD);
238
239 memcpy(data, &info, sizeof(info));
240 memcpy(data + sizeof(info), fw_data, len);
241 memset(data + sizeof(info) + len, 0, 4);
242
243 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
244 MT_FCE_DMA_ADDR, dst_addr);
245 len = roundup(len, 4);
246 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
247 MT_FCE_DMA_LEN, len << 16);
248
249 data_len = MT_CMD_HDR_LEN + len + sizeof(info);
250
251 err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000);
252 if (err) {
253 dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
254 return err;
255 }
256
257 val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
258 val++;
259 mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
260
261 return 0;
262 }
263
264 int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
265 int data_len, u32 max_payload, u32 offset)
266 {
267 int len, err = 0, pos = 0, max_len = max_payload - 8;
268 u8 *buf;
269
270 buf = kmalloc(max_payload, GFP_KERNEL);
271 if (!buf)
272 return -ENOMEM;
273
274 while (data_len > 0) {
275 len = min_t(int, data_len, max_len);
276 err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
277 len, offset + pos);
278 if (err < 0)
279 break;
280
281 data_len -= len;
282 pos += len;
283 usleep_range(5000, 10000);
284 }
285 kfree(buf);
286
287 return err;
288 }
289 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
290
291 void mt76x02u_init_mcu(struct mt76_dev *dev)
292 {
293 static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
294 .mcu_send_msg = mt76x02u_mcu_send_msg,
295 .mcu_wr_rp = mt76x02u_mcu_wr_rp,
296 .mcu_rd_rp = mt76x02u_mcu_rd_rp,
297 };
298
299 dev->mcu_ops = &mt76x02u_mcu_ops;
300 }
301 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
302
303 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
304 MODULE_LICENSE("Dual BSD/GPL");