This source file includes following definitions.
- nvmem_reg_read
- nvmem_reg_write
- nvmem_release
- of_nvmem_find
- nvmem_find
- nvmem_cell_drop
- nvmem_device_remove_all_cells
- nvmem_cell_add
- nvmem_cell_info_to_nvmem_cell
- nvmem_add_cells
- nvmem_register_notifier
- nvmem_unregister_notifier
- nvmem_add_cells_from_table
- nvmem_find_cell_by_name
- nvmem_add_cells_from_of
- nvmem_register
- nvmem_device_release
- nvmem_unregister
- devm_nvmem_release
- devm_nvmem_register
- devm_nvmem_match
- devm_nvmem_unregister
- __nvmem_device_get
- __nvmem_device_put
- of_nvmem_device_get
- nvmem_device_get
- devm_nvmem_device_match
- devm_nvmem_device_release
- devm_nvmem_device_put
- nvmem_device_put
- devm_nvmem_device_get
- nvmem_cell_get_from_lookup
- nvmem_find_cell_by_node
- of_nvmem_cell_get
- nvmem_cell_get
- devm_nvmem_cell_release
- devm_nvmem_cell_get
- devm_nvmem_cell_match
- devm_nvmem_cell_put
- nvmem_cell_put
- nvmem_shift_read_buffer_in_place
- __nvmem_cell_read
- nvmem_cell_read
- nvmem_cell_prepare_write_buffer
- nvmem_cell_write
- nvmem_cell_read_u16
- nvmem_cell_read_u32
- nvmem_device_cell_read
- nvmem_device_cell_write
- nvmem_device_read
- nvmem_device_write
- nvmem_add_cell_table
- nvmem_del_cell_table
- nvmem_add_cell_lookups
- nvmem_del_cell_lookups
- nvmem_dev_name
- nvmem_init
- nvmem_exit
1
2
3
4
5
6
7
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include "nvmem.h"
21
22 struct nvmem_cell {
23 const char *name;
24 int offset;
25 int bytes;
26 int bit_offset;
27 int nbits;
28 struct device_node *np;
29 struct nvmem_device *nvmem;
30 struct list_head node;
31 };
32
33 static DEFINE_MUTEX(nvmem_mutex);
34 static DEFINE_IDA(nvmem_ida);
35
36 static DEFINE_MUTEX(nvmem_cell_mutex);
37 static LIST_HEAD(nvmem_cell_tables);
38
39 static DEFINE_MUTEX(nvmem_lookup_mutex);
40 static LIST_HEAD(nvmem_lookup_list);
41
42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
43
44
45 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 void *val, size_t bytes)
47 {
48 if (nvmem->reg_read)
49 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
50
51 return -EINVAL;
52 }
53
54 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 void *val, size_t bytes)
56 {
57 if (nvmem->reg_write)
58 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
59
60 return -EINVAL;
61 }
62
63 static void nvmem_release(struct device *dev)
64 {
65 struct nvmem_device *nvmem = to_nvmem_device(dev);
66
67 ida_simple_remove(&nvmem_ida, nvmem->id);
68 kfree(nvmem);
69 }
70
71 static const struct device_type nvmem_provider_type = {
72 .release = nvmem_release,
73 };
74
75 static struct bus_type nvmem_bus_type = {
76 .name = "nvmem",
77 };
78
79 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
80 {
81 struct device *d;
82
83 if (!nvmem_np)
84 return NULL;
85
86 d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np);
87
88 if (!d)
89 return NULL;
90
91 return to_nvmem_device(d);
92 }
93
94 static struct nvmem_device *nvmem_find(const char *name)
95 {
96 struct device *d;
97
98 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
99
100 if (!d)
101 return NULL;
102
103 return to_nvmem_device(d);
104 }
105
106 static void nvmem_cell_drop(struct nvmem_cell *cell)
107 {
108 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
109 mutex_lock(&nvmem_mutex);
110 list_del(&cell->node);
111 mutex_unlock(&nvmem_mutex);
112 of_node_put(cell->np);
113 kfree_const(cell->name);
114 kfree(cell);
115 }
116
117 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
118 {
119 struct nvmem_cell *cell, *p;
120
121 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
122 nvmem_cell_drop(cell);
123 }
124
125 static void nvmem_cell_add(struct nvmem_cell *cell)
126 {
127 mutex_lock(&nvmem_mutex);
128 list_add_tail(&cell->node, &cell->nvmem->cells);
129 mutex_unlock(&nvmem_mutex);
130 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
131 }
132
133 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
134 const struct nvmem_cell_info *info,
135 struct nvmem_cell *cell)
136 {
137 cell->nvmem = nvmem;
138 cell->offset = info->offset;
139 cell->bytes = info->bytes;
140 cell->name = kstrdup_const(info->name, GFP_KERNEL);
141 if (!cell->name)
142 return -ENOMEM;
143
144 cell->bit_offset = info->bit_offset;
145 cell->nbits = info->nbits;
146
147 if (cell->nbits)
148 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
149 BITS_PER_BYTE);
150
151 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
152 dev_err(&nvmem->dev,
153 "cell %s unaligned to nvmem stride %d\n",
154 cell->name, nvmem->stride);
155 return -EINVAL;
156 }
157
158 return 0;
159 }
160
161
162
163
164
165
166
167
168
169
170 static int nvmem_add_cells(struct nvmem_device *nvmem,
171 const struct nvmem_cell_info *info,
172 int ncells)
173 {
174 struct nvmem_cell **cells;
175 int i, rval;
176
177 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
178 if (!cells)
179 return -ENOMEM;
180
181 for (i = 0; i < ncells; i++) {
182 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
183 if (!cells[i]) {
184 rval = -ENOMEM;
185 goto err;
186 }
187
188 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
189 if (rval) {
190 kfree(cells[i]);
191 goto err;
192 }
193
194 nvmem_cell_add(cells[i]);
195 }
196
197
198 kfree(cells);
199
200 return 0;
201 err:
202 while (i--)
203 nvmem_cell_drop(cells[i]);
204
205 kfree(cells);
206
207 return rval;
208 }
209
210
211
212
213
214
215
216
217 int nvmem_register_notifier(struct notifier_block *nb)
218 {
219 return blocking_notifier_chain_register(&nvmem_notifier, nb);
220 }
221 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
222
223
224
225
226
227
228
229
230 int nvmem_unregister_notifier(struct notifier_block *nb)
231 {
232 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
233 }
234 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
235
236 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
237 {
238 const struct nvmem_cell_info *info;
239 struct nvmem_cell_table *table;
240 struct nvmem_cell *cell;
241 int rval = 0, i;
242
243 mutex_lock(&nvmem_cell_mutex);
244 list_for_each_entry(table, &nvmem_cell_tables, node) {
245 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
246 for (i = 0; i < table->ncells; i++) {
247 info = &table->cells[i];
248
249 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
250 if (!cell) {
251 rval = -ENOMEM;
252 goto out;
253 }
254
255 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
256 info,
257 cell);
258 if (rval) {
259 kfree(cell);
260 goto out;
261 }
262
263 nvmem_cell_add(cell);
264 }
265 }
266 }
267
268 out:
269 mutex_unlock(&nvmem_cell_mutex);
270 return rval;
271 }
272
273 static struct nvmem_cell *
274 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
275 {
276 struct nvmem_cell *iter, *cell = NULL;
277
278 mutex_lock(&nvmem_mutex);
279 list_for_each_entry(iter, &nvmem->cells, node) {
280 if (strcmp(cell_id, iter->name) == 0) {
281 cell = iter;
282 break;
283 }
284 }
285 mutex_unlock(&nvmem_mutex);
286
287 return cell;
288 }
289
290 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
291 {
292 struct device_node *parent, *child;
293 struct device *dev = &nvmem->dev;
294 struct nvmem_cell *cell;
295 const __be32 *addr;
296 int len;
297
298 parent = dev->of_node;
299
300 for_each_child_of_node(parent, child) {
301 addr = of_get_property(child, "reg", &len);
302 if (!addr || (len < 2 * sizeof(u32))) {
303 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
304 return -EINVAL;
305 }
306
307 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
308 if (!cell)
309 return -ENOMEM;
310
311 cell->nvmem = nvmem;
312 cell->np = of_node_get(child);
313 cell->offset = be32_to_cpup(addr++);
314 cell->bytes = be32_to_cpup(addr);
315 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
316
317 addr = of_get_property(child, "bits", &len);
318 if (addr && len == (2 * sizeof(u32))) {
319 cell->bit_offset = be32_to_cpup(addr++);
320 cell->nbits = be32_to_cpup(addr);
321 }
322
323 if (cell->nbits)
324 cell->bytes = DIV_ROUND_UP(
325 cell->nbits + cell->bit_offset,
326 BITS_PER_BYTE);
327
328 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
329 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
330 cell->name, nvmem->stride);
331
332 kfree_const(cell->name);
333 kfree(cell);
334 return -EINVAL;
335 }
336
337 nvmem_cell_add(cell);
338 }
339
340 return 0;
341 }
342
343
344
345
346
347
348
349
350
351
352
353 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
354 {
355 struct nvmem_device *nvmem;
356 int rval;
357
358 if (!config->dev)
359 return ERR_PTR(-EINVAL);
360
361 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
362 if (!nvmem)
363 return ERR_PTR(-ENOMEM);
364
365 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
366 if (rval < 0) {
367 kfree(nvmem);
368 return ERR_PTR(rval);
369 }
370
371 kref_init(&nvmem->refcnt);
372 INIT_LIST_HEAD(&nvmem->cells);
373
374 nvmem->id = rval;
375 nvmem->owner = config->owner;
376 if (!nvmem->owner && config->dev->driver)
377 nvmem->owner = config->dev->driver->owner;
378 nvmem->stride = config->stride ?: 1;
379 nvmem->word_size = config->word_size ?: 1;
380 nvmem->size = config->size;
381 nvmem->dev.type = &nvmem_provider_type;
382 nvmem->dev.bus = &nvmem_bus_type;
383 nvmem->dev.parent = config->dev;
384 nvmem->priv = config->priv;
385 nvmem->type = config->type;
386 nvmem->reg_read = config->reg_read;
387 nvmem->reg_write = config->reg_write;
388 if (!config->no_of_node)
389 nvmem->dev.of_node = config->dev->of_node;
390
391 if (config->id == -1 && config->name) {
392 dev_set_name(&nvmem->dev, "%s", config->name);
393 } else {
394 dev_set_name(&nvmem->dev, "%s%d",
395 config->name ? : "nvmem",
396 config->name ? config->id : nvmem->id);
397 }
398
399 nvmem->read_only = device_property_present(config->dev, "read-only") ||
400 config->read_only || !nvmem->reg_write;
401
402 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
403
404 device_initialize(&nvmem->dev);
405
406 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
407
408 rval = device_add(&nvmem->dev);
409 if (rval)
410 goto err_put_device;
411
412 if (config->compat) {
413 rval = nvmem_sysfs_setup_compat(nvmem, config);
414 if (rval)
415 goto err_device_del;
416 }
417
418 if (config->cells) {
419 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
420 if (rval)
421 goto err_teardown_compat;
422 }
423
424 rval = nvmem_add_cells_from_table(nvmem);
425 if (rval)
426 goto err_remove_cells;
427
428 rval = nvmem_add_cells_from_of(nvmem);
429 if (rval)
430 goto err_remove_cells;
431
432 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
433
434 return nvmem;
435
436 err_remove_cells:
437 nvmem_device_remove_all_cells(nvmem);
438 err_teardown_compat:
439 if (config->compat)
440 nvmem_sysfs_remove_compat(nvmem, config);
441 err_device_del:
442 device_del(&nvmem->dev);
443 err_put_device:
444 put_device(&nvmem->dev);
445
446 return ERR_PTR(rval);
447 }
448 EXPORT_SYMBOL_GPL(nvmem_register);
449
450 static void nvmem_device_release(struct kref *kref)
451 {
452 struct nvmem_device *nvmem;
453
454 nvmem = container_of(kref, struct nvmem_device, refcnt);
455
456 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
457
458 if (nvmem->flags & FLAG_COMPAT)
459 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
460
461 nvmem_device_remove_all_cells(nvmem);
462 device_del(&nvmem->dev);
463 put_device(&nvmem->dev);
464 }
465
466
467
468
469
470
471 void nvmem_unregister(struct nvmem_device *nvmem)
472 {
473 kref_put(&nvmem->refcnt, nvmem_device_release);
474 }
475 EXPORT_SYMBOL_GPL(nvmem_unregister);
476
477 static void devm_nvmem_release(struct device *dev, void *res)
478 {
479 nvmem_unregister(*(struct nvmem_device **)res);
480 }
481
482
483
484
485
486
487
488
489
490
491
492
493 struct nvmem_device *devm_nvmem_register(struct device *dev,
494 const struct nvmem_config *config)
495 {
496 struct nvmem_device **ptr, *nvmem;
497
498 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
499 if (!ptr)
500 return ERR_PTR(-ENOMEM);
501
502 nvmem = nvmem_register(config);
503
504 if (!IS_ERR(nvmem)) {
505 *ptr = nvmem;
506 devres_add(dev, ptr);
507 } else {
508 devres_free(ptr);
509 }
510
511 return nvmem;
512 }
513 EXPORT_SYMBOL_GPL(devm_nvmem_register);
514
515 static int devm_nvmem_match(struct device *dev, void *res, void *data)
516 {
517 struct nvmem_device **r = res;
518
519 return *r == data;
520 }
521
522
523
524
525
526
527
528
529
530
531 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
532 {
533 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
534 }
535 EXPORT_SYMBOL(devm_nvmem_unregister);
536
537 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
538 const char *nvmem_name)
539 {
540 struct nvmem_device *nvmem = NULL;
541
542 mutex_lock(&nvmem_mutex);
543 nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
544 mutex_unlock(&nvmem_mutex);
545 if (!nvmem)
546 return ERR_PTR(-EPROBE_DEFER);
547
548 if (!try_module_get(nvmem->owner)) {
549 dev_err(&nvmem->dev,
550 "could not increase module refcount for cell %s\n",
551 nvmem_dev_name(nvmem));
552
553 put_device(&nvmem->dev);
554 return ERR_PTR(-EINVAL);
555 }
556
557 kref_get(&nvmem->refcnt);
558
559 return nvmem;
560 }
561
562 static void __nvmem_device_put(struct nvmem_device *nvmem)
563 {
564 put_device(&nvmem->dev);
565 module_put(nvmem->owner);
566 kref_put(&nvmem->refcnt, nvmem_device_release);
567 }
568
569 #if IS_ENABLED(CONFIG_OF)
570
571
572
573
574
575
576
577
578
579 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
580 {
581
582 struct device_node *nvmem_np;
583 int index = 0;
584
585 if (id)
586 index = of_property_match_string(np, "nvmem-names", id);
587
588 nvmem_np = of_parse_phandle(np, "nvmem", index);
589 if (!nvmem_np)
590 return ERR_PTR(-ENOENT);
591
592 return __nvmem_device_get(nvmem_np, NULL);
593 }
594 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
595 #endif
596
597
598
599
600
601
602
603
604
605
606 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
607 {
608 if (dev->of_node) {
609 struct nvmem_device *nvmem;
610
611 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
612
613 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
614 return nvmem;
615
616 }
617
618 return __nvmem_device_get(NULL, dev_name);
619 }
620 EXPORT_SYMBOL_GPL(nvmem_device_get);
621
622 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
623 {
624 struct nvmem_device **nvmem = res;
625
626 if (WARN_ON(!nvmem || !*nvmem))
627 return 0;
628
629 return *nvmem == data;
630 }
631
632 static void devm_nvmem_device_release(struct device *dev, void *res)
633 {
634 nvmem_device_put(*(struct nvmem_device **)res);
635 }
636
637
638
639
640
641
642
643
644 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
645 {
646 int ret;
647
648 ret = devres_release(dev, devm_nvmem_device_release,
649 devm_nvmem_device_match, nvmem);
650
651 WARN_ON(ret);
652 }
653 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
654
655
656
657
658
659
660 void nvmem_device_put(struct nvmem_device *nvmem)
661 {
662 __nvmem_device_put(nvmem);
663 }
664 EXPORT_SYMBOL_GPL(nvmem_device_put);
665
666
667
668
669
670
671
672
673
674
675
676 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
677 {
678 struct nvmem_device **ptr, *nvmem;
679
680 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
681 if (!ptr)
682 return ERR_PTR(-ENOMEM);
683
684 nvmem = nvmem_device_get(dev, id);
685 if (!IS_ERR(nvmem)) {
686 *ptr = nvmem;
687 devres_add(dev, ptr);
688 } else {
689 devres_free(ptr);
690 }
691
692 return nvmem;
693 }
694 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
695
696 static struct nvmem_cell *
697 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
698 {
699 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
700 struct nvmem_cell_lookup *lookup;
701 struct nvmem_device *nvmem;
702 const char *dev_id;
703
704 if (!dev)
705 return ERR_PTR(-EINVAL);
706
707 dev_id = dev_name(dev);
708
709 mutex_lock(&nvmem_lookup_mutex);
710
711 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
712 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
713 (strcmp(lookup->con_id, con_id) == 0)) {
714
715 nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
716 if (IS_ERR(nvmem)) {
717
718 cell = ERR_CAST(nvmem);
719 break;
720 }
721
722 cell = nvmem_find_cell_by_name(nvmem,
723 lookup->cell_name);
724 if (!cell) {
725 __nvmem_device_put(nvmem);
726 cell = ERR_PTR(-ENOENT);
727 }
728 break;
729 }
730 }
731
732 mutex_unlock(&nvmem_lookup_mutex);
733 return cell;
734 }
735
736 #if IS_ENABLED(CONFIG_OF)
737 static struct nvmem_cell *
738 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
739 {
740 struct nvmem_cell *iter, *cell = NULL;
741
742 mutex_lock(&nvmem_mutex);
743 list_for_each_entry(iter, &nvmem->cells, node) {
744 if (np == iter->np) {
745 cell = iter;
746 break;
747 }
748 }
749 mutex_unlock(&nvmem_mutex);
750
751 return cell;
752 }
753
754
755
756
757
758
759
760
761
762
763
764
765
766 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
767 {
768 struct device_node *cell_np, *nvmem_np;
769 struct nvmem_device *nvmem;
770 struct nvmem_cell *cell;
771 int index = 0;
772
773
774 if (id)
775 index = of_property_match_string(np, "nvmem-cell-names", id);
776
777 cell_np = of_parse_phandle(np, "nvmem-cells", index);
778 if (!cell_np)
779 return ERR_PTR(-ENOENT);
780
781 nvmem_np = of_get_next_parent(cell_np);
782 if (!nvmem_np)
783 return ERR_PTR(-EINVAL);
784
785 nvmem = __nvmem_device_get(nvmem_np, NULL);
786 of_node_put(nvmem_np);
787 if (IS_ERR(nvmem))
788 return ERR_CAST(nvmem);
789
790 cell = nvmem_find_cell_by_node(nvmem, cell_np);
791 if (!cell) {
792 __nvmem_device_put(nvmem);
793 return ERR_PTR(-ENOENT);
794 }
795
796 return cell;
797 }
798 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
799 #endif
800
801
802
803
804
805
806
807
808
809
810
811
812
813 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
814 {
815 struct nvmem_cell *cell;
816
817 if (dev->of_node) {
818 cell = of_nvmem_cell_get(dev->of_node, id);
819 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
820 return cell;
821 }
822
823
824 if (!id)
825 return ERR_PTR(-EINVAL);
826
827 return nvmem_cell_get_from_lookup(dev, id);
828 }
829 EXPORT_SYMBOL_GPL(nvmem_cell_get);
830
831 static void devm_nvmem_cell_release(struct device *dev, void *res)
832 {
833 nvmem_cell_put(*(struct nvmem_cell **)res);
834 }
835
836
837
838
839
840
841
842
843
844
845
846 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
847 {
848 struct nvmem_cell **ptr, *cell;
849
850 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
851 if (!ptr)
852 return ERR_PTR(-ENOMEM);
853
854 cell = nvmem_cell_get(dev, id);
855 if (!IS_ERR(cell)) {
856 *ptr = cell;
857 devres_add(dev, ptr);
858 } else {
859 devres_free(ptr);
860 }
861
862 return cell;
863 }
864 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
865
866 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
867 {
868 struct nvmem_cell **c = res;
869
870 if (WARN_ON(!c || !*c))
871 return 0;
872
873 return *c == data;
874 }
875
876
877
878
879
880
881
882
883 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
884 {
885 int ret;
886
887 ret = devres_release(dev, devm_nvmem_cell_release,
888 devm_nvmem_cell_match, cell);
889
890 WARN_ON(ret);
891 }
892 EXPORT_SYMBOL(devm_nvmem_cell_put);
893
894
895
896
897
898
899 void nvmem_cell_put(struct nvmem_cell *cell)
900 {
901 struct nvmem_device *nvmem = cell->nvmem;
902
903 __nvmem_device_put(nvmem);
904 }
905 EXPORT_SYMBOL_GPL(nvmem_cell_put);
906
907 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
908 {
909 u8 *p, *b;
910 int i, extra, bit_offset = cell->bit_offset;
911
912 p = b = buf;
913 if (bit_offset) {
914
915 *b++ >>= bit_offset;
916
917
918 for (i = 1; i < cell->bytes; i++) {
919
920 *p |= *b << (BITS_PER_BYTE - bit_offset);
921
922 p = b;
923 *b++ >>= bit_offset;
924 }
925 } else {
926
927 p += cell->bytes - 1;
928 }
929
930
931 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
932 while (--extra >= 0)
933 *p-- = 0;
934
935
936 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
937 }
938
939 static int __nvmem_cell_read(struct nvmem_device *nvmem,
940 struct nvmem_cell *cell,
941 void *buf, size_t *len)
942 {
943 int rc;
944
945 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
946
947 if (rc)
948 return rc;
949
950
951 if (cell->bit_offset || cell->nbits)
952 nvmem_shift_read_buffer_in_place(cell, buf);
953
954 if (len)
955 *len = cell->bytes;
956
957 return 0;
958 }
959
960
961
962
963
964
965
966
967
968
969
970 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
971 {
972 struct nvmem_device *nvmem = cell->nvmem;
973 u8 *buf;
974 int rc;
975
976 if (!nvmem)
977 return ERR_PTR(-EINVAL);
978
979 buf = kzalloc(cell->bytes, GFP_KERNEL);
980 if (!buf)
981 return ERR_PTR(-ENOMEM);
982
983 rc = __nvmem_cell_read(nvmem, cell, buf, len);
984 if (rc) {
985 kfree(buf);
986 return ERR_PTR(rc);
987 }
988
989 return buf;
990 }
991 EXPORT_SYMBOL_GPL(nvmem_cell_read);
992
993 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
994 u8 *_buf, int len)
995 {
996 struct nvmem_device *nvmem = cell->nvmem;
997 int i, rc, nbits, bit_offset = cell->bit_offset;
998 u8 v, *p, *buf, *b, pbyte, pbits;
999
1000 nbits = cell->nbits;
1001 buf = kzalloc(cell->bytes, GFP_KERNEL);
1002 if (!buf)
1003 return ERR_PTR(-ENOMEM);
1004
1005 memcpy(buf, _buf, len);
1006 p = b = buf;
1007
1008 if (bit_offset) {
1009 pbyte = *b;
1010 *b <<= bit_offset;
1011
1012
1013 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1014 if (rc)
1015 goto err;
1016 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1017
1018
1019 for (i = 1; i < cell->bytes; i++) {
1020
1021 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1022 pbyte = *b;
1023 p = b;
1024 *b <<= bit_offset;
1025 *b++ |= pbits;
1026 }
1027 }
1028
1029
1030 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1031
1032 rc = nvmem_reg_read(nvmem,
1033 cell->offset + cell->bytes - 1, &v, 1);
1034 if (rc)
1035 goto err;
1036 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1037
1038 }
1039
1040 return buf;
1041 err:
1042 kfree(buf);
1043 return ERR_PTR(rc);
1044 }
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1056 {
1057 struct nvmem_device *nvmem = cell->nvmem;
1058 int rc;
1059
1060 if (!nvmem || nvmem->read_only ||
1061 (cell->bit_offset == 0 && len != cell->bytes))
1062 return -EINVAL;
1063
1064 if (cell->bit_offset || cell->nbits) {
1065 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1066 if (IS_ERR(buf))
1067 return PTR_ERR(buf);
1068 }
1069
1070 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1071
1072
1073 if (cell->bit_offset || cell->nbits)
1074 kfree(buf);
1075
1076 if (rc)
1077 return rc;
1078
1079 return len;
1080 }
1081 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1093 {
1094 struct nvmem_cell *cell;
1095 void *buf;
1096 size_t len;
1097
1098 cell = nvmem_cell_get(dev, cell_id);
1099 if (IS_ERR(cell))
1100 return PTR_ERR(cell);
1101
1102 buf = nvmem_cell_read(cell, &len);
1103 if (IS_ERR(buf)) {
1104 nvmem_cell_put(cell);
1105 return PTR_ERR(buf);
1106 }
1107 if (len != sizeof(*val)) {
1108 kfree(buf);
1109 nvmem_cell_put(cell);
1110 return -EINVAL;
1111 }
1112 memcpy(val, buf, sizeof(*val));
1113 kfree(buf);
1114 nvmem_cell_put(cell);
1115
1116 return 0;
1117 }
1118 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1130 {
1131 struct nvmem_cell *cell;
1132 void *buf;
1133 size_t len;
1134
1135 cell = nvmem_cell_get(dev, cell_id);
1136 if (IS_ERR(cell))
1137 return PTR_ERR(cell);
1138
1139 buf = nvmem_cell_read(cell, &len);
1140 if (IS_ERR(buf)) {
1141 nvmem_cell_put(cell);
1142 return PTR_ERR(buf);
1143 }
1144 if (len != sizeof(*val)) {
1145 kfree(buf);
1146 nvmem_cell_put(cell);
1147 return -EINVAL;
1148 }
1149 memcpy(val, buf, sizeof(*val));
1150
1151 kfree(buf);
1152 nvmem_cell_put(cell);
1153 return 0;
1154 }
1155 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1168 struct nvmem_cell_info *info, void *buf)
1169 {
1170 struct nvmem_cell cell;
1171 int rc;
1172 ssize_t len;
1173
1174 if (!nvmem)
1175 return -EINVAL;
1176
1177 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1178 if (rc)
1179 return rc;
1180
1181 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1182 if (rc)
1183 return rc;
1184
1185 return len;
1186 }
1187 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1199 struct nvmem_cell_info *info, void *buf)
1200 {
1201 struct nvmem_cell cell;
1202 int rc;
1203
1204 if (!nvmem)
1205 return -EINVAL;
1206
1207 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1208 if (rc)
1209 return rc;
1210
1211 return nvmem_cell_write(&cell, buf, cell.bytes);
1212 }
1213 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 int nvmem_device_read(struct nvmem_device *nvmem,
1227 unsigned int offset,
1228 size_t bytes, void *buf)
1229 {
1230 int rc;
1231
1232 if (!nvmem)
1233 return -EINVAL;
1234
1235 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1236
1237 if (rc)
1238 return rc;
1239
1240 return bytes;
1241 }
1242 EXPORT_SYMBOL_GPL(nvmem_device_read);
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254 int nvmem_device_write(struct nvmem_device *nvmem,
1255 unsigned int offset,
1256 size_t bytes, void *buf)
1257 {
1258 int rc;
1259
1260 if (!nvmem)
1261 return -EINVAL;
1262
1263 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1264
1265 if (rc)
1266 return rc;
1267
1268
1269 return bytes;
1270 }
1271 EXPORT_SYMBOL_GPL(nvmem_device_write);
1272
1273
1274
1275
1276
1277
1278 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1279 {
1280 mutex_lock(&nvmem_cell_mutex);
1281 list_add_tail(&table->node, &nvmem_cell_tables);
1282 mutex_unlock(&nvmem_cell_mutex);
1283 }
1284 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1285
1286
1287
1288
1289
1290
1291 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1292 {
1293 mutex_lock(&nvmem_cell_mutex);
1294 list_del(&table->node);
1295 mutex_unlock(&nvmem_cell_mutex);
1296 }
1297 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1298
1299
1300
1301
1302
1303
1304
1305 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1306 {
1307 int i;
1308
1309 mutex_lock(&nvmem_lookup_mutex);
1310 for (i = 0; i < nentries; i++)
1311 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1312 mutex_unlock(&nvmem_lookup_mutex);
1313 }
1314 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1315
1316
1317
1318
1319
1320
1321
1322
1323 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1324 {
1325 int i;
1326
1327 mutex_lock(&nvmem_lookup_mutex);
1328 for (i = 0; i < nentries; i++)
1329 list_del(&entries[i].node);
1330 mutex_unlock(&nvmem_lookup_mutex);
1331 }
1332 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1333
1334
1335
1336
1337
1338
1339
1340
1341 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1342 {
1343 return dev_name(&nvmem->dev);
1344 }
1345 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1346
1347 static int __init nvmem_init(void)
1348 {
1349 return bus_register(&nvmem_bus_type);
1350 }
1351
1352 static void __exit nvmem_exit(void)
1353 {
1354 bus_unregister(&nvmem_bus_type);
1355 }
1356
1357 subsys_initcall(nvmem_init);
1358 module_exit(nvmem_exit);
1359
1360 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1361 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1362 MODULE_DESCRIPTION("nvmem Driver Core");
1363 MODULE_LICENSE("GPL v2");