This source file includes following definitions.
- bm_ccsr_in
- bm_ccsr_out
- bm_get_version
- bm_set_memory
- bman_fbpr
- bman_isr
- bman_is_probed
- bman_requires_cleanup
- bman_done_cleanup
- fsl_bman_probe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #include "bman_priv.h"
32
33 u16 bman_ip_rev;
34 EXPORT_SYMBOL(bman_ip_rev);
35
36
37 #define REG_FBPR_FPC 0x0800
38 #define REG_ECSR 0x0a00
39 #define REG_ECIR 0x0a04
40 #define REG_EADR 0x0a08
41 #define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
42 #define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
43 #define REG_IP_REV_1 0x0bf8
44 #define REG_IP_REV_2 0x0bfc
45 #define REG_FBPR_BARE 0x0c00
46 #define REG_FBPR_BAR 0x0c04
47 #define REG_FBPR_AR 0x0c10
48 #define REG_SRCIDR 0x0d04
49 #define REG_LIODNR 0x0d08
50 #define REG_ERR_ISR 0x0e00
51 #define REG_ERR_IER 0x0e04
52 #define REG_ERR_ISDR 0x0e08
53
54
55 #define BM_EIRQ_IVCI 0x00000010
56 #define BM_EIRQ_FLWI 0x00000008
57 #define BM_EIRQ_MBEI 0x00000004
58 #define BM_EIRQ_SBEI 0x00000002
59 #define BM_EIRQ_BSCN 0x00000001
60
61 struct bman_hwerr_txt {
62 u32 mask;
63 const char *txt;
64 };
65
66 static const struct bman_hwerr_txt bman_hwerr_txts[] = {
67 { BM_EIRQ_IVCI, "Invalid Command Verb" },
68 { BM_EIRQ_FLWI, "FBPR Low Watermark" },
69 { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
70 { BM_EIRQ_SBEI, "Single-bit ECC Error" },
71 { BM_EIRQ_BSCN, "Pool State Change Notification" },
72 };
73
74
75 #define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
76
77
78 static u32 __iomem *bm_ccsr_start;
79
80 static inline u32 bm_ccsr_in(u32 offset)
81 {
82 return ioread32be(bm_ccsr_start + offset/4);
83 }
84 static inline void bm_ccsr_out(u32 offset, u32 val)
85 {
86 iowrite32be(val, bm_ccsr_start + offset/4);
87 }
88
89 static void bm_get_version(u16 *id, u8 *major, u8 *minor)
90 {
91 u32 v = bm_ccsr_in(REG_IP_REV_1);
92 *id = (v >> 16);
93 *major = (v >> 8) & 0xff;
94 *minor = v & 0xff;
95 }
96
97
98 #define FBPR_AR_RPRIO_HI BIT(30)
99
100
101 static int __bman_probed;
102 static int __bman_requires_cleanup;
103
104
105 static int bm_set_memory(u64 ba, u32 size)
106 {
107 u32 bar, bare;
108 u32 exp = ilog2(size);
109
110 DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
111 is_power_of_2(size));
112
113 DPAA_ASSERT(!(ba & (size - 1)));
114
115
116 bar = bm_ccsr_in(REG_FBPR_BAR);
117 if (bar) {
118
119 bare = bm_ccsr_in(REG_FBPR_BARE);
120 if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
121 pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
122 ba, bare, bar);
123 return -ENOMEM;
124 }
125 pr_info("BMan BAR already configured\n");
126 __bman_requires_cleanup = 1;
127 return 1;
128 }
129
130 bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
131 bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
132 bm_ccsr_out(REG_FBPR_AR, exp - 1);
133 return 0;
134 }
135
136
137
138
139
140
141
142
143
144 static dma_addr_t fbpr_a;
145 static size_t fbpr_sz;
146
147 static int bman_fbpr(struct reserved_mem *rmem)
148 {
149 fbpr_a = rmem->base;
150 fbpr_sz = rmem->size;
151
152 WARN_ON(!(fbpr_a && fbpr_sz));
153
154 return 0;
155 }
156 RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
157
158 static irqreturn_t bman_isr(int irq, void *ptr)
159 {
160 u32 isr_val, ier_val, ecsr_val, isr_mask, i;
161 struct device *dev = ptr;
162
163 ier_val = bm_ccsr_in(REG_ERR_IER);
164 isr_val = bm_ccsr_in(REG_ERR_ISR);
165 ecsr_val = bm_ccsr_in(REG_ECSR);
166 isr_mask = isr_val & ier_val;
167
168 if (!isr_mask)
169 return IRQ_NONE;
170
171 for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
172 if (bman_hwerr_txts[i].mask & isr_mask) {
173 dev_err_ratelimited(dev, "ErrInt: %s\n",
174 bman_hwerr_txts[i].txt);
175 if (bman_hwerr_txts[i].mask & ecsr_val) {
176
177 bm_ccsr_out(REG_ECSR, ecsr_val);
178 }
179 if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
180 dev_dbg(dev, "Disabling error 0x%x\n",
181 bman_hwerr_txts[i].mask);
182 ier_val &= ~bman_hwerr_txts[i].mask;
183 bm_ccsr_out(REG_ERR_IER, ier_val);
184 }
185 }
186 }
187 bm_ccsr_out(REG_ERR_ISR, isr_val);
188
189 return IRQ_HANDLED;
190 }
191
192 int bman_is_probed(void)
193 {
194 return __bman_probed;
195 }
196 EXPORT_SYMBOL_GPL(bman_is_probed);
197
198 int bman_requires_cleanup(void)
199 {
200 return __bman_requires_cleanup;
201 }
202
203 void bman_done_cleanup(void)
204 {
205 __bman_requires_cleanup = 0;
206 }
207
208 static int fsl_bman_probe(struct platform_device *pdev)
209 {
210 int ret, err_irq;
211 struct device *dev = &pdev->dev;
212 struct device_node *node = dev->of_node;
213 struct resource *res;
214 u16 id, bm_pool_cnt;
215 u8 major, minor;
216
217 __bman_probed = -1;
218
219 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
220 if (!res) {
221 dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
222 node);
223 return -ENXIO;
224 }
225 bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
226 if (!bm_ccsr_start)
227 return -ENXIO;
228
229 bm_get_version(&id, &major, &minor);
230 if (major == 1 && minor == 0) {
231 bman_ip_rev = BMAN_REV10;
232 bm_pool_cnt = BM_POOL_MAX;
233 } else if (major == 2 && minor == 0) {
234 bman_ip_rev = BMAN_REV20;
235 bm_pool_cnt = 8;
236 } else if (major == 2 && minor == 1) {
237 bman_ip_rev = BMAN_REV21;
238 bm_pool_cnt = BM_POOL_MAX;
239 } else {
240 dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
241 id, major, minor);
242 return -ENODEV;
243 }
244
245
246
247
248
249 if (!fbpr_a) {
250 ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
251 if (ret) {
252 dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
253 ret);
254 return -ENODEV;
255 }
256 }
257
258 dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
259
260 bm_set_memory(fbpr_a, fbpr_sz);
261
262 err_irq = platform_get_irq(pdev, 0);
263 if (err_irq <= 0) {
264 dev_info(dev, "Can't get %pOF IRQ\n", node);
265 return -ENODEV;
266 }
267 ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
268 dev);
269 if (ret) {
270 dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
271 ret, node);
272 return ret;
273 }
274
275 bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
276
277
278
279
280 bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
281
282 bm_ccsr_out(REG_ERR_IER, 0xffffffff);
283
284 bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
285 if (IS_ERR(bm_bpalloc)) {
286 ret = PTR_ERR(bm_bpalloc);
287 dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
288 return ret;
289 }
290
291
292 ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
293 if (ret) {
294 dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
295 0, bm_pool_cnt - 1, ret);
296 return ret;
297 }
298
299 __bman_probed = 1;
300
301 return 0;
302 };
303
304 static const struct of_device_id fsl_bman_ids[] = {
305 {
306 .compatible = "fsl,bman",
307 },
308 {}
309 };
310
311 static struct platform_driver fsl_bman_driver = {
312 .driver = {
313 .name = KBUILD_MODNAME,
314 .of_match_table = fsl_bman_ids,
315 .suppress_bind_attrs = true,
316 },
317 .probe = fsl_bman_probe,
318 };
319
320 builtin_platform_driver(fsl_bman_driver);