This source file includes following definitions.
- llcc_slice_getd
- llcc_slice_putd
- llcc_update_act_ctrl
- llcc_slice_activate
- llcc_slice_deactivate
- llcc_get_slice_id
- llcc_get_slice_size
- qcom_llcc_cfg_program
- qcom_llcc_remove
- qcom_llcc_init_mmio
- qcom_llcc_probe
1
2
3
4
5
6
7 #include <linux/bitmap.h>
8 #include <linux/bitops.h>
9 #include <linux/device.h>
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/of_device.h>
15 #include <linux/regmap.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/soc/qcom/llcc-qcom.h>
19
20 #define ACTIVATE BIT(0)
21 #define DEACTIVATE BIT(1)
22 #define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
23 #define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
24 #define ACT_CTRL_ACT_TRIG BIT(0)
25 #define ACT_CTRL_OPCODE_SHIFT 0x01
26 #define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
27 #define ATTR1_FIXED_SIZE_SHIFT 0x03
28 #define ATTR1_PRIORITY_SHIFT 0x04
29 #define ATTR1_MAX_CAP_SHIFT 0x10
30 #define ATTR0_RES_WAYS_MASK GENMASK(11, 0)
31 #define ATTR0_BONUS_WAYS_MASK GENMASK(27, 16)
32 #define ATTR0_BONUS_WAYS_SHIFT 0x10
33 #define LLCC_STATUS_READ_DELAY 100
34
35 #define CACHE_LINE_SIZE_SHIFT 6
36
37 #define LLCC_COMMON_STATUS0 0x0003000c
38 #define LLCC_LB_CNT_MASK GENMASK(31, 28)
39 #define LLCC_LB_CNT_SHIFT 28
40
41 #define MAX_CAP_TO_BYTES(n) (n * SZ_1K)
42 #define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K)
43 #define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
44 #define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
45 #define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
46
47 #define BANK_OFFSET_STRIDE 0x80000
48
49 static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
50
51 static struct regmap_config llcc_regmap_config = {
52 .reg_bits = 32,
53 .reg_stride = 4,
54 .val_bits = 32,
55 .fast_io = true,
56 };
57
58
59
60
61
62
63
64
65 struct llcc_slice_desc *llcc_slice_getd(u32 uid)
66 {
67 const struct llcc_slice_config *cfg;
68 struct llcc_slice_desc *desc;
69 u32 sz, count;
70
71 if (IS_ERR(drv_data))
72 return ERR_CAST(drv_data);
73
74 cfg = drv_data->cfg;
75 sz = drv_data->cfg_size;
76
77 for (count = 0; cfg && count < sz; count++, cfg++)
78 if (cfg->usecase_id == uid)
79 break;
80
81 if (count == sz || !cfg)
82 return ERR_PTR(-ENODEV);
83
84 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
85 if (!desc)
86 return ERR_PTR(-ENOMEM);
87
88 desc->slice_id = cfg->slice_id;
89 desc->slice_size = cfg->max_cap;
90
91 return desc;
92 }
93 EXPORT_SYMBOL_GPL(llcc_slice_getd);
94
95
96
97
98
99 void llcc_slice_putd(struct llcc_slice_desc *desc)
100 {
101 if (!IS_ERR_OR_NULL(desc))
102 kfree(desc);
103 }
104 EXPORT_SYMBOL_GPL(llcc_slice_putd);
105
106 static int llcc_update_act_ctrl(u32 sid,
107 u32 act_ctrl_reg_val, u32 status)
108 {
109 u32 act_ctrl_reg;
110 u32 status_reg;
111 u32 slice_status;
112 int ret;
113
114 if (IS_ERR(drv_data))
115 return PTR_ERR(drv_data);
116
117 act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
118 status_reg = LLCC_TRP_STATUSn(sid);
119
120
121 act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
122 ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg,
123 act_ctrl_reg_val);
124 if (ret)
125 return ret;
126
127
128 act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
129 ret = regmap_write(drv_data->bcast_regmap, act_ctrl_reg,
130 act_ctrl_reg_val);
131 if (ret)
132 return ret;
133
134 ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
135 slice_status, !(slice_status & status),
136 0, LLCC_STATUS_READ_DELAY);
137 return ret;
138 }
139
140
141
142
143
144
145
146
147 int llcc_slice_activate(struct llcc_slice_desc *desc)
148 {
149 int ret;
150 u32 act_ctrl_val;
151
152 if (IS_ERR(drv_data))
153 return PTR_ERR(drv_data);
154
155 if (IS_ERR_OR_NULL(desc))
156 return -EINVAL;
157
158 mutex_lock(&drv_data->lock);
159 if (test_bit(desc->slice_id, drv_data->bitmap)) {
160 mutex_unlock(&drv_data->lock);
161 return 0;
162 }
163
164 act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
165
166 ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
167 DEACTIVATE);
168 if (ret) {
169 mutex_unlock(&drv_data->lock);
170 return ret;
171 }
172
173 __set_bit(desc->slice_id, drv_data->bitmap);
174 mutex_unlock(&drv_data->lock);
175
176 return ret;
177 }
178 EXPORT_SYMBOL_GPL(llcc_slice_activate);
179
180
181
182
183
184
185
186
187 int llcc_slice_deactivate(struct llcc_slice_desc *desc)
188 {
189 u32 act_ctrl_val;
190 int ret;
191
192 if (IS_ERR(drv_data))
193 return PTR_ERR(drv_data);
194
195 if (IS_ERR_OR_NULL(desc))
196 return -EINVAL;
197
198 mutex_lock(&drv_data->lock);
199 if (!test_bit(desc->slice_id, drv_data->bitmap)) {
200 mutex_unlock(&drv_data->lock);
201 return 0;
202 }
203 act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
204
205 ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
206 ACTIVATE);
207 if (ret) {
208 mutex_unlock(&drv_data->lock);
209 return ret;
210 }
211
212 __clear_bit(desc->slice_id, drv_data->bitmap);
213 mutex_unlock(&drv_data->lock);
214
215 return ret;
216 }
217 EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
218
219
220
221
222
223 int llcc_get_slice_id(struct llcc_slice_desc *desc)
224 {
225 if (IS_ERR_OR_NULL(desc))
226 return -EINVAL;
227
228 return desc->slice_id;
229 }
230 EXPORT_SYMBOL_GPL(llcc_get_slice_id);
231
232
233
234
235
236 size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
237 {
238 if (IS_ERR_OR_NULL(desc))
239 return 0;
240
241 return desc->slice_size;
242 }
243 EXPORT_SYMBOL_GPL(llcc_get_slice_size);
244
245 static int qcom_llcc_cfg_program(struct platform_device *pdev)
246 {
247 int i;
248 u32 attr1_cfg;
249 u32 attr0_cfg;
250 u32 attr1_val;
251 u32 attr0_val;
252 u32 max_cap_cacheline;
253 u32 sz;
254 int ret = 0;
255 const struct llcc_slice_config *llcc_table;
256 struct llcc_slice_desc desc;
257
258 sz = drv_data->cfg_size;
259 llcc_table = drv_data->cfg;
260
261 for (i = 0; i < sz; i++) {
262 attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
263 attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
264
265 attr1_val = llcc_table[i].cache_mode;
266 attr1_val |= llcc_table[i].probe_target_ways <<
267 ATTR1_PROBE_TARGET_WAYS_SHIFT;
268 attr1_val |= llcc_table[i].fixed_size <<
269 ATTR1_FIXED_SIZE_SHIFT;
270 attr1_val |= llcc_table[i].priority <<
271 ATTR1_PRIORITY_SHIFT;
272
273 max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
274
275
276
277
278
279
280
281 max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
282 max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
283 attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
284
285 attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
286 attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
287
288 ret = regmap_write(drv_data->bcast_regmap, attr1_cfg,
289 attr1_val);
290 if (ret)
291 return ret;
292 ret = regmap_write(drv_data->bcast_regmap, attr0_cfg,
293 attr0_val);
294 if (ret)
295 return ret;
296 if (llcc_table[i].activate_on_init) {
297 desc.slice_id = llcc_table[i].slice_id;
298 ret = llcc_slice_activate(&desc);
299 }
300 }
301 return ret;
302 }
303
304 int qcom_llcc_remove(struct platform_device *pdev)
305 {
306
307 drv_data = ERR_PTR(-ENODEV);
308 return 0;
309 }
310 EXPORT_SYMBOL_GPL(qcom_llcc_remove);
311
312 static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
313 const char *name)
314 {
315 struct resource *res;
316 void __iomem *base;
317
318 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
319 if (!res)
320 return ERR_PTR(-ENODEV);
321
322 base = devm_ioremap_resource(&pdev->dev, res);
323 if (IS_ERR(base))
324 return ERR_CAST(base);
325
326 llcc_regmap_config.name = name;
327 return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
328 }
329
330 int qcom_llcc_probe(struct platform_device *pdev,
331 const struct llcc_slice_config *llcc_cfg, u32 sz)
332 {
333 u32 num_banks;
334 struct device *dev = &pdev->dev;
335 int ret, i;
336 struct platform_device *llcc_edac;
337
338 drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
339 if (!drv_data) {
340 ret = -ENOMEM;
341 goto err;
342 }
343
344 drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
345 if (IS_ERR(drv_data->regmap)) {
346 ret = PTR_ERR(drv_data->regmap);
347 goto err;
348 }
349
350 drv_data->bcast_regmap =
351 qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
352 if (IS_ERR(drv_data->bcast_regmap)) {
353 ret = PTR_ERR(drv_data->bcast_regmap);
354 goto err;
355 }
356
357 ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
358 &num_banks);
359 if (ret)
360 goto err;
361
362 num_banks &= LLCC_LB_CNT_MASK;
363 num_banks >>= LLCC_LB_CNT_SHIFT;
364 drv_data->num_banks = num_banks;
365
366 for (i = 0; i < sz; i++)
367 if (llcc_cfg[i].slice_id > drv_data->max_slices)
368 drv_data->max_slices = llcc_cfg[i].slice_id;
369
370 drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
371 GFP_KERNEL);
372 if (!drv_data->offsets) {
373 ret = -ENOMEM;
374 goto err;
375 }
376
377 for (i = 0; i < num_banks; i++)
378 drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
379
380 drv_data->bitmap = devm_kcalloc(dev,
381 BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
382 GFP_KERNEL);
383 if (!drv_data->bitmap) {
384 ret = -ENOMEM;
385 goto err;
386 }
387
388 drv_data->cfg = llcc_cfg;
389 drv_data->cfg_size = sz;
390 mutex_init(&drv_data->lock);
391 platform_set_drvdata(pdev, drv_data);
392
393 ret = qcom_llcc_cfg_program(pdev);
394 if (ret)
395 goto err;
396
397 drv_data->ecc_irq = platform_get_irq(pdev, 0);
398 if (drv_data->ecc_irq >= 0) {
399 llcc_edac = platform_device_register_data(&pdev->dev,
400 "qcom_llcc_edac", -1, drv_data,
401 sizeof(*drv_data));
402 if (IS_ERR(llcc_edac))
403 dev_err(dev, "Failed to register llcc edac driver\n");
404 }
405
406 return 0;
407 err:
408 drv_data = ERR_PTR(-ENODEV);
409 return ret;
410 }
411 EXPORT_SYMBOL_GPL(qcom_llcc_probe);
412 MODULE_LICENSE("GPL v2");
413 MODULE_DESCRIPTION("Qualcomm Last Level Cache Controller");