root/drivers/base/regmap/regmap.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. regmap_should_log
  2. regmap_should_log
  3. regmap_reg_in_ranges
  4. regmap_check_range_table
  5. regmap_writeable
  6. regmap_cached
  7. regmap_readable
  8. regmap_volatile
  9. regmap_precious
  10. regmap_writeable_noinc
  11. regmap_readable_noinc
  12. regmap_volatile_range
  13. regmap_format_2_6_write
  14. regmap_format_4_12_write
  15. regmap_format_7_9_write
  16. regmap_format_10_14_write
  17. regmap_format_8
  18. regmap_format_16_be
  19. regmap_format_16_le
  20. regmap_format_16_native
  21. regmap_format_24
  22. regmap_format_32_be
  23. regmap_format_32_le
  24. regmap_format_32_native
  25. regmap_format_64_be
  26. regmap_format_64_le
  27. regmap_format_64_native
  28. regmap_parse_inplace_noop
  29. regmap_parse_8
  30. regmap_parse_16_be
  31. regmap_parse_16_le
  32. regmap_parse_16_be_inplace
  33. regmap_parse_16_le_inplace
  34. regmap_parse_16_native
  35. regmap_parse_24
  36. regmap_parse_32_be
  37. regmap_parse_32_le
  38. regmap_parse_32_be_inplace
  39. regmap_parse_32_le_inplace
  40. regmap_parse_32_native
  41. regmap_parse_64_be
  42. regmap_parse_64_le
  43. regmap_parse_64_be_inplace
  44. regmap_parse_64_le_inplace
  45. regmap_parse_64_native
  46. regmap_lock_hwlock
  47. regmap_lock_hwlock_irq
  48. regmap_lock_hwlock_irqsave
  49. regmap_unlock_hwlock
  50. regmap_unlock_hwlock_irq
  51. regmap_unlock_hwlock_irqrestore
  52. regmap_lock_unlock_none
  53. regmap_lock_mutex
  54. regmap_unlock_mutex
  55. regmap_lock_spinlock
  56. regmap_unlock_spinlock
  57. dev_get_regmap_release
  58. _regmap_range_add
  59. _regmap_range_lookup
  60. regmap_range_exit
  61. regmap_attach_dev
  62. regmap_get_reg_endian
  63. regmap_get_val_endian
  64. __regmap_init
  65. devm_regmap_release
  66. __devm_regmap_init
  67. regmap_field_init
  68. devm_regmap_field_alloc
  69. devm_regmap_field_free
  70. regmap_field_alloc
  71. regmap_field_free
  72. regmap_reinit_cache
  73. regmap_exit
  74. dev_get_regmap_match
  75. dev_get_regmap
  76. regmap_get_device
  77. _regmap_select_page
  78. regmap_set_work_buf_flag_mask
  79. _regmap_raw_write_impl
  80. regmap_can_raw_write
  81. regmap_get_raw_read_max
  82. regmap_get_raw_write_max
  83. _regmap_bus_formatted_write
  84. _regmap_bus_reg_write
  85. _regmap_bus_raw_write
  86. _regmap_map_get_context
  87. _regmap_write
  88. regmap_write
  89. regmap_write_async
  90. _regmap_raw_write
  91. regmap_raw_write
  92. regmap_noinc_write
  93. regmap_field_update_bits_base
  94. regmap_fields_update_bits_base
  95. regmap_bulk_write
  96. _regmap_raw_multi_reg_write
  97. _regmap_register_page
  98. _regmap_range_multi_paged_reg_write
  99. _regmap_multi_reg_write
  100. regmap_multi_reg_write
  101. regmap_multi_reg_write_bypassed
  102. regmap_raw_write_async
  103. _regmap_raw_read
  104. _regmap_bus_reg_read
  105. _regmap_bus_read
  106. _regmap_read
  107. regmap_read
  108. regmap_raw_read
  109. regmap_noinc_read
  110. regmap_field_read
  111. regmap_fields_read
  112. regmap_bulk_read
  113. _regmap_update_bits
  114. regmap_update_bits_base
  115. regmap_async_complete_cb
  116. regmap_async_is_done
  117. regmap_async_complete
  118. regmap_register_patch
  119. regmap_get_val_bytes
  120. regmap_get_max_register
  121. regmap_get_reg_stride
  122. regmap_parse_val
  123. regmap_initcall

   1 // SPDX-License-Identifier: GPL-2.0
   2 //
   3 // Register map access API
   4 //
   5 // Copyright 2011 Wolfson Microelectronics plc
   6 //
   7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
   8 
   9 #include <linux/device.h>
  10 #include <linux/slab.h>
  11 #include <linux/export.h>
  12 #include <linux/mutex.h>
  13 #include <linux/err.h>
  14 #include <linux/of.h>
  15 #include <linux/rbtree.h>
  16 #include <linux/sched.h>
  17 #include <linux/delay.h>
  18 #include <linux/log2.h>
  19 #include <linux/hwspinlock.h>
  20 
  21 #define CREATE_TRACE_POINTS
  22 #include "trace.h"
  23 
  24 #include "internal.h"
  25 
  26 /*
  27  * Sometimes for failures during very early init the trace
  28  * infrastructure isn't available early enough to be used.  For this
  29  * sort of problem defining LOG_DEVICE will add printks for basic
  30  * register I/O on a specific device.
  31  */
  32 #undef LOG_DEVICE
  33 
  34 #ifdef LOG_DEVICE
  35 static inline bool regmap_should_log(struct regmap *map)
  36 {
  37         return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
  38 }
  39 #else
  40 static inline bool regmap_should_log(struct regmap *map) { return false; }
  41 #endif
  42 
  43 
  44 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
  45                                unsigned int mask, unsigned int val,
  46                                bool *change, bool force_write);
  47 
  48 static int _regmap_bus_reg_read(void *context, unsigned int reg,
  49                                 unsigned int *val);
  50 static int _regmap_bus_read(void *context, unsigned int reg,
  51                             unsigned int *val);
  52 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
  53                                        unsigned int val);
  54 static int _regmap_bus_reg_write(void *context, unsigned int reg,
  55                                  unsigned int val);
  56 static int _regmap_bus_raw_write(void *context, unsigned int reg,
  57                                  unsigned int val);
  58 
  59 bool regmap_reg_in_ranges(unsigned int reg,
  60                           const struct regmap_range *ranges,
  61                           unsigned int nranges)
  62 {
  63         const struct regmap_range *r;
  64         int i;
  65 
  66         for (i = 0, r = ranges; i < nranges; i++, r++)
  67                 if (regmap_reg_in_range(reg, r))
  68                         return true;
  69         return false;
  70 }
  71 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
  72 
  73 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
  74                               const struct regmap_access_table *table)
  75 {
  76         /* Check "no ranges" first */
  77         if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
  78                 return false;
  79 
  80         /* In case zero "yes ranges" are supplied, any reg is OK */
  81         if (!table->n_yes_ranges)
  82                 return true;
  83 
  84         return regmap_reg_in_ranges(reg, table->yes_ranges,
  85                                     table->n_yes_ranges);
  86 }
  87 EXPORT_SYMBOL_GPL(regmap_check_range_table);
  88 
  89 bool regmap_writeable(struct regmap *map, unsigned int reg)
  90 {
  91         if (map->max_register && reg > map->max_register)
  92                 return false;
  93 
  94         if (map->writeable_reg)
  95                 return map->writeable_reg(map->dev, reg);
  96 
  97         if (map->wr_table)
  98                 return regmap_check_range_table(map, reg, map->wr_table);
  99 
 100         return true;
 101 }
 102 
 103 bool regmap_cached(struct regmap *map, unsigned int reg)
 104 {
 105         int ret;
 106         unsigned int val;
 107 
 108         if (map->cache_type == REGCACHE_NONE)
 109                 return false;
 110 
 111         if (!map->cache_ops)
 112                 return false;
 113 
 114         if (map->max_register && reg > map->max_register)
 115                 return false;
 116 
 117         map->lock(map->lock_arg);
 118         ret = regcache_read(map, reg, &val);
 119         map->unlock(map->lock_arg);
 120         if (ret)
 121                 return false;
 122 
 123         return true;
 124 }
 125 
 126 bool regmap_readable(struct regmap *map, unsigned int reg)
 127 {
 128         if (!map->reg_read)
 129                 return false;
 130 
 131         if (map->max_register && reg > map->max_register)
 132                 return false;
 133 
 134         if (map->format.format_write)
 135                 return false;
 136 
 137         if (map->readable_reg)
 138                 return map->readable_reg(map->dev, reg);
 139 
 140         if (map->rd_table)
 141                 return regmap_check_range_table(map, reg, map->rd_table);
 142 
 143         return true;
 144 }
 145 
 146 bool regmap_volatile(struct regmap *map, unsigned int reg)
 147 {
 148         if (!map->format.format_write && !regmap_readable(map, reg))
 149                 return false;
 150 
 151         if (map->volatile_reg)
 152                 return map->volatile_reg(map->dev, reg);
 153 
 154         if (map->volatile_table)
 155                 return regmap_check_range_table(map, reg, map->volatile_table);
 156 
 157         if (map->cache_ops)
 158                 return false;
 159         else
 160                 return true;
 161 }
 162 
 163 bool regmap_precious(struct regmap *map, unsigned int reg)
 164 {
 165         if (!regmap_readable(map, reg))
 166                 return false;
 167 
 168         if (map->precious_reg)
 169                 return map->precious_reg(map->dev, reg);
 170 
 171         if (map->precious_table)
 172                 return regmap_check_range_table(map, reg, map->precious_table);
 173 
 174         return false;
 175 }
 176 
 177 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
 178 {
 179         if (map->writeable_noinc_reg)
 180                 return map->writeable_noinc_reg(map->dev, reg);
 181 
 182         if (map->wr_noinc_table)
 183                 return regmap_check_range_table(map, reg, map->wr_noinc_table);
 184 
 185         return true;
 186 }
 187 
 188 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
 189 {
 190         if (map->readable_noinc_reg)
 191                 return map->readable_noinc_reg(map->dev, reg);
 192 
 193         if (map->rd_noinc_table)
 194                 return regmap_check_range_table(map, reg, map->rd_noinc_table);
 195 
 196         return true;
 197 }
 198 
 199 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
 200         size_t num)
 201 {
 202         unsigned int i;
 203 
 204         for (i = 0; i < num; i++)
 205                 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
 206                         return false;
 207 
 208         return true;
 209 }
 210 
 211 static void regmap_format_2_6_write(struct regmap *map,
 212                                      unsigned int reg, unsigned int val)
 213 {
 214         u8 *out = map->work_buf;
 215 
 216         *out = (reg << 6) | val;
 217 }
 218 
 219 static void regmap_format_4_12_write(struct regmap *map,
 220                                      unsigned int reg, unsigned int val)
 221 {
 222         __be16 *out = map->work_buf;
 223         *out = cpu_to_be16((reg << 12) | val);
 224 }
 225 
 226 static void regmap_format_7_9_write(struct regmap *map,
 227                                     unsigned int reg, unsigned int val)
 228 {
 229         __be16 *out = map->work_buf;
 230         *out = cpu_to_be16((reg << 9) | val);
 231 }
 232 
 233 static void regmap_format_10_14_write(struct regmap *map,
 234                                     unsigned int reg, unsigned int val)
 235 {
 236         u8 *out = map->work_buf;
 237 
 238         out[2] = val;
 239         out[1] = (val >> 8) | (reg << 6);
 240         out[0] = reg >> 2;
 241 }
 242 
 243 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
 244 {
 245         u8 *b = buf;
 246 
 247         b[0] = val << shift;
 248 }
 249 
 250 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
 251 {
 252         __be16 *b = buf;
 253 
 254         b[0] = cpu_to_be16(val << shift);
 255 }
 256 
 257 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
 258 {
 259         __le16 *b = buf;
 260 
 261         b[0] = cpu_to_le16(val << shift);
 262 }
 263 
 264 static void regmap_format_16_native(void *buf, unsigned int val,
 265                                     unsigned int shift)
 266 {
 267         *(u16 *)buf = val << shift;
 268 }
 269 
 270 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
 271 {
 272         u8 *b = buf;
 273 
 274         val <<= shift;
 275 
 276         b[0] = val >> 16;
 277         b[1] = val >> 8;
 278         b[2] = val;
 279 }
 280 
 281 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
 282 {
 283         __be32 *b = buf;
 284 
 285         b[0] = cpu_to_be32(val << shift);
 286 }
 287 
 288 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
 289 {
 290         __le32 *b = buf;
 291 
 292         b[0] = cpu_to_le32(val << shift);
 293 }
 294 
 295 static void regmap_format_32_native(void *buf, unsigned int val,
 296                                     unsigned int shift)
 297 {
 298         *(u32 *)buf = val << shift;
 299 }
 300 
 301 #ifdef CONFIG_64BIT
 302 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
 303 {
 304         __be64 *b = buf;
 305 
 306         b[0] = cpu_to_be64((u64)val << shift);
 307 }
 308 
 309 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
 310 {
 311         __le64 *b = buf;
 312 
 313         b[0] = cpu_to_le64((u64)val << shift);
 314 }
 315 
 316 static void regmap_format_64_native(void *buf, unsigned int val,
 317                                     unsigned int shift)
 318 {
 319         *(u64 *)buf = (u64)val << shift;
 320 }
 321 #endif
 322 
 323 static void regmap_parse_inplace_noop(void *buf)
 324 {
 325 }
 326 
 327 static unsigned int regmap_parse_8(const void *buf)
 328 {
 329         const u8 *b = buf;
 330 
 331         return b[0];
 332 }
 333 
 334 static unsigned int regmap_parse_16_be(const void *buf)
 335 {
 336         const __be16 *b = buf;
 337 
 338         return be16_to_cpu(b[0]);
 339 }
 340 
 341 static unsigned int regmap_parse_16_le(const void *buf)
 342 {
 343         const __le16 *b = buf;
 344 
 345         return le16_to_cpu(b[0]);
 346 }
 347 
 348 static void regmap_parse_16_be_inplace(void *buf)
 349 {
 350         __be16 *b = buf;
 351 
 352         b[0] = be16_to_cpu(b[0]);
 353 }
 354 
 355 static void regmap_parse_16_le_inplace(void *buf)
 356 {
 357         __le16 *b = buf;
 358 
 359         b[0] = le16_to_cpu(b[0]);
 360 }
 361 
 362 static unsigned int regmap_parse_16_native(const void *buf)
 363 {
 364         return *(u16 *)buf;
 365 }
 366 
 367 static unsigned int regmap_parse_24(const void *buf)
 368 {
 369         const u8 *b = buf;
 370         unsigned int ret = b[2];
 371         ret |= ((unsigned int)b[1]) << 8;
 372         ret |= ((unsigned int)b[0]) << 16;
 373 
 374         return ret;
 375 }
 376 
 377 static unsigned int regmap_parse_32_be(const void *buf)
 378 {
 379         const __be32 *b = buf;
 380 
 381         return be32_to_cpu(b[0]);
 382 }
 383 
 384 static unsigned int regmap_parse_32_le(const void *buf)
 385 {
 386         const __le32 *b = buf;
 387 
 388         return le32_to_cpu(b[0]);
 389 }
 390 
 391 static void regmap_parse_32_be_inplace(void *buf)
 392 {
 393         __be32 *b = buf;
 394 
 395         b[0] = be32_to_cpu(b[0]);
 396 }
 397 
 398 static void regmap_parse_32_le_inplace(void *buf)
 399 {
 400         __le32 *b = buf;
 401 
 402         b[0] = le32_to_cpu(b[0]);
 403 }
 404 
 405 static unsigned int regmap_parse_32_native(const void *buf)
 406 {
 407         return *(u32 *)buf;
 408 }
 409 
 410 #ifdef CONFIG_64BIT
 411 static unsigned int regmap_parse_64_be(const void *buf)
 412 {
 413         const __be64 *b = buf;
 414 
 415         return be64_to_cpu(b[0]);
 416 }
 417 
 418 static unsigned int regmap_parse_64_le(const void *buf)
 419 {
 420         const __le64 *b = buf;
 421 
 422         return le64_to_cpu(b[0]);
 423 }
 424 
 425 static void regmap_parse_64_be_inplace(void *buf)
 426 {
 427         __be64 *b = buf;
 428 
 429         b[0] = be64_to_cpu(b[0]);
 430 }
 431 
 432 static void regmap_parse_64_le_inplace(void *buf)
 433 {
 434         __le64 *b = buf;
 435 
 436         b[0] = le64_to_cpu(b[0]);
 437 }
 438 
 439 static unsigned int regmap_parse_64_native(const void *buf)
 440 {
 441         return *(u64 *)buf;
 442 }
 443 #endif
 444 
 445 static void regmap_lock_hwlock(void *__map)
 446 {
 447         struct regmap *map = __map;
 448 
 449         hwspin_lock_timeout(map->hwlock, UINT_MAX);
 450 }
 451 
 452 static void regmap_lock_hwlock_irq(void *__map)
 453 {
 454         struct regmap *map = __map;
 455 
 456         hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
 457 }
 458 
 459 static void regmap_lock_hwlock_irqsave(void *__map)
 460 {
 461         struct regmap *map = __map;
 462 
 463         hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
 464                                     &map->spinlock_flags);
 465 }
 466 
 467 static void regmap_unlock_hwlock(void *__map)
 468 {
 469         struct regmap *map = __map;
 470 
 471         hwspin_unlock(map->hwlock);
 472 }
 473 
 474 static void regmap_unlock_hwlock_irq(void *__map)
 475 {
 476         struct regmap *map = __map;
 477 
 478         hwspin_unlock_irq(map->hwlock);
 479 }
 480 
 481 static void regmap_unlock_hwlock_irqrestore(void *__map)
 482 {
 483         struct regmap *map = __map;
 484 
 485         hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
 486 }
 487 
 488 static void regmap_lock_unlock_none(void *__map)
 489 {
 490 
 491 }
 492 
 493 static void regmap_lock_mutex(void *__map)
 494 {
 495         struct regmap *map = __map;
 496         mutex_lock(&map->mutex);
 497 }
 498 
 499 static void regmap_unlock_mutex(void *__map)
 500 {
 501         struct regmap *map = __map;
 502         mutex_unlock(&map->mutex);
 503 }
 504 
 505 static void regmap_lock_spinlock(void *__map)
 506 __acquires(&map->spinlock)
 507 {
 508         struct regmap *map = __map;
 509         unsigned long flags;
 510 
 511         spin_lock_irqsave(&map->spinlock, flags);
 512         map->spinlock_flags = flags;
 513 }
 514 
 515 static void regmap_unlock_spinlock(void *__map)
 516 __releases(&map->spinlock)
 517 {
 518         struct regmap *map = __map;
 519         spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
 520 }
 521 
 522 static void dev_get_regmap_release(struct device *dev, void *res)
 523 {
 524         /*
 525          * We don't actually have anything to do here; the goal here
 526          * is not to manage the regmap but to provide a simple way to
 527          * get the regmap back given a struct device.
 528          */
 529 }
 530 
 531 static bool _regmap_range_add(struct regmap *map,
 532                               struct regmap_range_node *data)
 533 {
 534         struct rb_root *root = &map->range_tree;
 535         struct rb_node **new = &(root->rb_node), *parent = NULL;
 536 
 537         while (*new) {
 538                 struct regmap_range_node *this =
 539                         rb_entry(*new, struct regmap_range_node, node);
 540 
 541                 parent = *new;
 542                 if (data->range_max < this->range_min)
 543                         new = &((*new)->rb_left);
 544                 else if (data->range_min > this->range_max)
 545                         new = &((*new)->rb_right);
 546                 else
 547                         return false;
 548         }
 549 
 550         rb_link_node(&data->node, parent, new);
 551         rb_insert_color(&data->node, root);
 552 
 553         return true;
 554 }
 555 
 556 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
 557                                                       unsigned int reg)
 558 {
 559         struct rb_node *node = map->range_tree.rb_node;
 560 
 561         while (node) {
 562                 struct regmap_range_node *this =
 563                         rb_entry(node, struct regmap_range_node, node);
 564 
 565                 if (reg < this->range_min)
 566                         node = node->rb_left;
 567                 else if (reg > this->range_max)
 568                         node = node->rb_right;
 569                 else
 570                         return this;
 571         }
 572 
 573         return NULL;
 574 }
 575 
 576 static void regmap_range_exit(struct regmap *map)
 577 {
 578         struct rb_node *next;
 579         struct regmap_range_node *range_node;
 580 
 581         next = rb_first(&map->range_tree);
 582         while (next) {
 583                 range_node = rb_entry(next, struct regmap_range_node, node);
 584                 next = rb_next(&range_node->node);
 585                 rb_erase(&range_node->node, &map->range_tree);
 586                 kfree(range_node);
 587         }
 588 
 589         kfree(map->selector_work_buf);
 590 }
 591 
 592 int regmap_attach_dev(struct device *dev, struct regmap *map,
 593                       const struct regmap_config *config)
 594 {
 595         struct regmap **m;
 596 
 597         map->dev = dev;
 598 
 599         regmap_debugfs_init(map, config->name);
 600 
 601         /* Add a devres resource for dev_get_regmap() */
 602         m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
 603         if (!m) {
 604                 regmap_debugfs_exit(map);
 605                 return -ENOMEM;
 606         }
 607         *m = map;
 608         devres_add(dev, m);
 609 
 610         return 0;
 611 }
 612 EXPORT_SYMBOL_GPL(regmap_attach_dev);
 613 
 614 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
 615                                         const struct regmap_config *config)
 616 {
 617         enum regmap_endian endian;
 618 
 619         /* Retrieve the endianness specification from the regmap config */
 620         endian = config->reg_format_endian;
 621 
 622         /* If the regmap config specified a non-default value, use that */
 623         if (endian != REGMAP_ENDIAN_DEFAULT)
 624                 return endian;
 625 
 626         /* Retrieve the endianness specification from the bus config */
 627         if (bus && bus->reg_format_endian_default)
 628                 endian = bus->reg_format_endian_default;
 629 
 630         /* If the bus specified a non-default value, use that */
 631         if (endian != REGMAP_ENDIAN_DEFAULT)
 632                 return endian;
 633 
 634         /* Use this if no other value was found */
 635         return REGMAP_ENDIAN_BIG;
 636 }
 637 
 638 enum regmap_endian regmap_get_val_endian(struct device *dev,
 639                                          const struct regmap_bus *bus,
 640                                          const struct regmap_config *config)
 641 {
 642         struct device_node *np;
 643         enum regmap_endian endian;
 644 
 645         /* Retrieve the endianness specification from the regmap config */
 646         endian = config->val_format_endian;
 647 
 648         /* If the regmap config specified a non-default value, use that */
 649         if (endian != REGMAP_ENDIAN_DEFAULT)
 650                 return endian;
 651 
 652         /* If the dev and dev->of_node exist try to get endianness from DT */
 653         if (dev && dev->of_node) {
 654                 np = dev->of_node;
 655 
 656                 /* Parse the device's DT node for an endianness specification */
 657                 if (of_property_read_bool(np, "big-endian"))
 658                         endian = REGMAP_ENDIAN_BIG;
 659                 else if (of_property_read_bool(np, "little-endian"))
 660                         endian = REGMAP_ENDIAN_LITTLE;
 661                 else if (of_property_read_bool(np, "native-endian"))
 662                         endian = REGMAP_ENDIAN_NATIVE;
 663 
 664                 /* If the endianness was specified in DT, use that */
 665                 if (endian != REGMAP_ENDIAN_DEFAULT)
 666                         return endian;
 667         }
 668 
 669         /* Retrieve the endianness specification from the bus config */
 670         if (bus && bus->val_format_endian_default)
 671                 endian = bus->val_format_endian_default;
 672 
 673         /* If the bus specified a non-default value, use that */
 674         if (endian != REGMAP_ENDIAN_DEFAULT)
 675                 return endian;
 676 
 677         /* Use this if no other value was found */
 678         return REGMAP_ENDIAN_BIG;
 679 }
 680 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
 681 
 682 struct regmap *__regmap_init(struct device *dev,
 683                              const struct regmap_bus *bus,
 684                              void *bus_context,
 685                              const struct regmap_config *config,
 686                              struct lock_class_key *lock_key,
 687                              const char *lock_name)
 688 {
 689         struct regmap *map;
 690         int ret = -EINVAL;
 691         enum regmap_endian reg_endian, val_endian;
 692         int i, j;
 693 
 694         if (!config)
 695                 goto err;
 696 
 697         map = kzalloc(sizeof(*map), GFP_KERNEL);
 698         if (map == NULL) {
 699                 ret = -ENOMEM;
 700                 goto err;
 701         }
 702 
 703         if (config->name) {
 704                 map->name = kstrdup_const(config->name, GFP_KERNEL);
 705                 if (!map->name) {
 706                         ret = -ENOMEM;
 707                         goto err_map;
 708                 }
 709         }
 710 
 711         if (config->disable_locking) {
 712                 map->lock = map->unlock = regmap_lock_unlock_none;
 713                 regmap_debugfs_disable(map);
 714         } else if (config->lock && config->unlock) {
 715                 map->lock = config->lock;
 716                 map->unlock = config->unlock;
 717                 map->lock_arg = config->lock_arg;
 718         } else if (config->use_hwlock) {
 719                 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
 720                 if (!map->hwlock) {
 721                         ret = -ENXIO;
 722                         goto err_name;
 723                 }
 724 
 725                 switch (config->hwlock_mode) {
 726                 case HWLOCK_IRQSTATE:
 727                         map->lock = regmap_lock_hwlock_irqsave;
 728                         map->unlock = regmap_unlock_hwlock_irqrestore;
 729                         break;
 730                 case HWLOCK_IRQ:
 731                         map->lock = regmap_lock_hwlock_irq;
 732                         map->unlock = regmap_unlock_hwlock_irq;
 733                         break;
 734                 default:
 735                         map->lock = regmap_lock_hwlock;
 736                         map->unlock = regmap_unlock_hwlock;
 737                         break;
 738                 }
 739 
 740                 map->lock_arg = map;
 741         } else {
 742                 if ((bus && bus->fast_io) ||
 743                     config->fast_io) {
 744                         spin_lock_init(&map->spinlock);
 745                         map->lock = regmap_lock_spinlock;
 746                         map->unlock = regmap_unlock_spinlock;
 747                         lockdep_set_class_and_name(&map->spinlock,
 748                                                    lock_key, lock_name);
 749                 } else {
 750                         mutex_init(&map->mutex);
 751                         map->lock = regmap_lock_mutex;
 752                         map->unlock = regmap_unlock_mutex;
 753                         lockdep_set_class_and_name(&map->mutex,
 754                                                    lock_key, lock_name);
 755                 }
 756                 map->lock_arg = map;
 757         }
 758 
 759         /*
 760          * When we write in fast-paths with regmap_bulk_write() don't allocate
 761          * scratch buffers with sleeping allocations.
 762          */
 763         if ((bus && bus->fast_io) || config->fast_io)
 764                 map->alloc_flags = GFP_ATOMIC;
 765         else
 766                 map->alloc_flags = GFP_KERNEL;
 767 
 768         map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
 769         map->format.pad_bytes = config->pad_bits / 8;
 770         map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
 771         map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
 772                         config->val_bits + config->pad_bits, 8);
 773         map->reg_shift = config->pad_bits % 8;
 774         if (config->reg_stride)
 775                 map->reg_stride = config->reg_stride;
 776         else
 777                 map->reg_stride = 1;
 778         if (is_power_of_2(map->reg_stride))
 779                 map->reg_stride_order = ilog2(map->reg_stride);
 780         else
 781                 map->reg_stride_order = -1;
 782         map->use_single_read = config->use_single_read || !bus || !bus->read;
 783         map->use_single_write = config->use_single_write || !bus || !bus->write;
 784         map->can_multi_write = config->can_multi_write && bus && bus->write;
 785         if (bus) {
 786                 map->max_raw_read = bus->max_raw_read;
 787                 map->max_raw_write = bus->max_raw_write;
 788         }
 789         map->dev = dev;
 790         map->bus = bus;
 791         map->bus_context = bus_context;
 792         map->max_register = config->max_register;
 793         map->wr_table = config->wr_table;
 794         map->rd_table = config->rd_table;
 795         map->volatile_table = config->volatile_table;
 796         map->precious_table = config->precious_table;
 797         map->wr_noinc_table = config->wr_noinc_table;
 798         map->rd_noinc_table = config->rd_noinc_table;
 799         map->writeable_reg = config->writeable_reg;
 800         map->readable_reg = config->readable_reg;
 801         map->volatile_reg = config->volatile_reg;
 802         map->precious_reg = config->precious_reg;
 803         map->writeable_noinc_reg = config->writeable_noinc_reg;
 804         map->readable_noinc_reg = config->readable_noinc_reg;
 805         map->cache_type = config->cache_type;
 806 
 807         spin_lock_init(&map->async_lock);
 808         INIT_LIST_HEAD(&map->async_list);
 809         INIT_LIST_HEAD(&map->async_free);
 810         init_waitqueue_head(&map->async_waitq);
 811 
 812         if (config->read_flag_mask ||
 813             config->write_flag_mask ||
 814             config->zero_flag_mask) {
 815                 map->read_flag_mask = config->read_flag_mask;
 816                 map->write_flag_mask = config->write_flag_mask;
 817         } else if (bus) {
 818                 map->read_flag_mask = bus->read_flag_mask;
 819         }
 820 
 821         if (!bus) {
 822                 map->reg_read  = config->reg_read;
 823                 map->reg_write = config->reg_write;
 824 
 825                 map->defer_caching = false;
 826                 goto skip_format_initialization;
 827         } else if (!bus->read || !bus->write) {
 828                 map->reg_read = _regmap_bus_reg_read;
 829                 map->reg_write = _regmap_bus_reg_write;
 830 
 831                 map->defer_caching = false;
 832                 goto skip_format_initialization;
 833         } else {
 834                 map->reg_read  = _regmap_bus_read;
 835                 map->reg_update_bits = bus->reg_update_bits;
 836         }
 837 
 838         reg_endian = regmap_get_reg_endian(bus, config);
 839         val_endian = regmap_get_val_endian(dev, bus, config);
 840 
 841         switch (config->reg_bits + map->reg_shift) {
 842         case 2:
 843                 switch (config->val_bits) {
 844                 case 6:
 845                         map->format.format_write = regmap_format_2_6_write;
 846                         break;
 847                 default:
 848                         goto err_hwlock;
 849                 }
 850                 break;
 851 
 852         case 4:
 853                 switch (config->val_bits) {
 854                 case 12:
 855                         map->format.format_write = regmap_format_4_12_write;
 856                         break;
 857                 default:
 858                         goto err_hwlock;
 859                 }
 860                 break;
 861 
 862         case 7:
 863                 switch (config->val_bits) {
 864                 case 9:
 865                         map->format.format_write = regmap_format_7_9_write;
 866                         break;
 867                 default:
 868                         goto err_hwlock;
 869                 }
 870                 break;
 871 
 872         case 10:
 873                 switch (config->val_bits) {
 874                 case 14:
 875                         map->format.format_write = regmap_format_10_14_write;
 876                         break;
 877                 default:
 878                         goto err_hwlock;
 879                 }
 880                 break;
 881 
 882         case 8:
 883                 map->format.format_reg = regmap_format_8;
 884                 break;
 885 
 886         case 16:
 887                 switch (reg_endian) {
 888                 case REGMAP_ENDIAN_BIG:
 889                         map->format.format_reg = regmap_format_16_be;
 890                         break;
 891                 case REGMAP_ENDIAN_LITTLE:
 892                         map->format.format_reg = regmap_format_16_le;
 893                         break;
 894                 case REGMAP_ENDIAN_NATIVE:
 895                         map->format.format_reg = regmap_format_16_native;
 896                         break;
 897                 default:
 898                         goto err_hwlock;
 899                 }
 900                 break;
 901 
 902         case 24:
 903                 if (reg_endian != REGMAP_ENDIAN_BIG)
 904                         goto err_hwlock;
 905                 map->format.format_reg = regmap_format_24;
 906                 break;
 907 
 908         case 32:
 909                 switch (reg_endian) {
 910                 case REGMAP_ENDIAN_BIG:
 911                         map->format.format_reg = regmap_format_32_be;
 912                         break;
 913                 case REGMAP_ENDIAN_LITTLE:
 914                         map->format.format_reg = regmap_format_32_le;
 915                         break;
 916                 case REGMAP_ENDIAN_NATIVE:
 917                         map->format.format_reg = regmap_format_32_native;
 918                         break;
 919                 default:
 920                         goto err_hwlock;
 921                 }
 922                 break;
 923 
 924 #ifdef CONFIG_64BIT
 925         case 64:
 926                 switch (reg_endian) {
 927                 case REGMAP_ENDIAN_BIG:
 928                         map->format.format_reg = regmap_format_64_be;
 929                         break;
 930                 case REGMAP_ENDIAN_LITTLE:
 931                         map->format.format_reg = regmap_format_64_le;
 932                         break;
 933                 case REGMAP_ENDIAN_NATIVE:
 934                         map->format.format_reg = regmap_format_64_native;
 935                         break;
 936                 default:
 937                         goto err_hwlock;
 938                 }
 939                 break;
 940 #endif
 941 
 942         default:
 943                 goto err_hwlock;
 944         }
 945 
 946         if (val_endian == REGMAP_ENDIAN_NATIVE)
 947                 map->format.parse_inplace = regmap_parse_inplace_noop;
 948 
 949         switch (config->val_bits) {
 950         case 8:
 951                 map->format.format_val = regmap_format_8;
 952                 map->format.parse_val = regmap_parse_8;
 953                 map->format.parse_inplace = regmap_parse_inplace_noop;
 954                 break;
 955         case 16:
 956                 switch (val_endian) {
 957                 case REGMAP_ENDIAN_BIG:
 958                         map->format.format_val = regmap_format_16_be;
 959                         map->format.parse_val = regmap_parse_16_be;
 960                         map->format.parse_inplace = regmap_parse_16_be_inplace;
 961                         break;
 962                 case REGMAP_ENDIAN_LITTLE:
 963                         map->format.format_val = regmap_format_16_le;
 964                         map->format.parse_val = regmap_parse_16_le;
 965                         map->format.parse_inplace = regmap_parse_16_le_inplace;
 966                         break;
 967                 case REGMAP_ENDIAN_NATIVE:
 968                         map->format.format_val = regmap_format_16_native;
 969                         map->format.parse_val = regmap_parse_16_native;
 970                         break;
 971                 default:
 972                         goto err_hwlock;
 973                 }
 974                 break;
 975         case 24:
 976                 if (val_endian != REGMAP_ENDIAN_BIG)
 977                         goto err_hwlock;
 978                 map->format.format_val = regmap_format_24;
 979                 map->format.parse_val = regmap_parse_24;
 980                 break;
 981         case 32:
 982                 switch (val_endian) {
 983                 case REGMAP_ENDIAN_BIG:
 984                         map->format.format_val = regmap_format_32_be;
 985                         map->format.parse_val = regmap_parse_32_be;
 986                         map->format.parse_inplace = regmap_parse_32_be_inplace;
 987                         break;
 988                 case REGMAP_ENDIAN_LITTLE:
 989                         map->format.format_val = regmap_format_32_le;
 990                         map->format.parse_val = regmap_parse_32_le;
 991                         map->format.parse_inplace = regmap_parse_32_le_inplace;
 992                         break;
 993                 case REGMAP_ENDIAN_NATIVE:
 994                         map->format.format_val = regmap_format_32_native;
 995                         map->format.parse_val = regmap_parse_32_native;
 996                         break;
 997                 default:
 998                         goto err_hwlock;
 999                 }
1000                 break;
1001 #ifdef CONFIG_64BIT
1002         case 64:
1003                 switch (val_endian) {
1004                 case REGMAP_ENDIAN_BIG:
1005                         map->format.format_val = regmap_format_64_be;
1006                         map->format.parse_val = regmap_parse_64_be;
1007                         map->format.parse_inplace = regmap_parse_64_be_inplace;
1008                         break;
1009                 case REGMAP_ENDIAN_LITTLE:
1010                         map->format.format_val = regmap_format_64_le;
1011                         map->format.parse_val = regmap_parse_64_le;
1012                         map->format.parse_inplace = regmap_parse_64_le_inplace;
1013                         break;
1014                 case REGMAP_ENDIAN_NATIVE:
1015                         map->format.format_val = regmap_format_64_native;
1016                         map->format.parse_val = regmap_parse_64_native;
1017                         break;
1018                 default:
1019                         goto err_hwlock;
1020                 }
1021                 break;
1022 #endif
1023         }
1024 
1025         if (map->format.format_write) {
1026                 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1027                     (val_endian != REGMAP_ENDIAN_BIG))
1028                         goto err_hwlock;
1029                 map->use_single_write = true;
1030         }
1031 
1032         if (!map->format.format_write &&
1033             !(map->format.format_reg && map->format.format_val))
1034                 goto err_hwlock;
1035 
1036         map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1037         if (map->work_buf == NULL) {
1038                 ret = -ENOMEM;
1039                 goto err_hwlock;
1040         }
1041 
1042         if (map->format.format_write) {
1043                 map->defer_caching = false;
1044                 map->reg_write = _regmap_bus_formatted_write;
1045         } else if (map->format.format_val) {
1046                 map->defer_caching = true;
1047                 map->reg_write = _regmap_bus_raw_write;
1048         }
1049 
1050 skip_format_initialization:
1051 
1052         map->range_tree = RB_ROOT;
1053         for (i = 0; i < config->num_ranges; i++) {
1054                 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1055                 struct regmap_range_node *new;
1056 
1057                 /* Sanity check */
1058                 if (range_cfg->range_max < range_cfg->range_min) {
1059                         dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1060                                 range_cfg->range_max, range_cfg->range_min);
1061                         goto err_range;
1062                 }
1063 
1064                 if (range_cfg->range_max > map->max_register) {
1065                         dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1066                                 range_cfg->range_max, map->max_register);
1067                         goto err_range;
1068                 }
1069 
1070                 if (range_cfg->selector_reg > map->max_register) {
1071                         dev_err(map->dev,
1072                                 "Invalid range %d: selector out of map\n", i);
1073                         goto err_range;
1074                 }
1075 
1076                 if (range_cfg->window_len == 0) {
1077                         dev_err(map->dev, "Invalid range %d: window_len 0\n",
1078                                 i);
1079                         goto err_range;
1080                 }
1081 
1082                 /* Make sure, that this register range has no selector
1083                    or data window within its boundary */
1084                 for (j = 0; j < config->num_ranges; j++) {
1085                         unsigned sel_reg = config->ranges[j].selector_reg;
1086                         unsigned win_min = config->ranges[j].window_start;
1087                         unsigned win_max = win_min +
1088                                            config->ranges[j].window_len - 1;
1089 
1090                         /* Allow data window inside its own virtual range */
1091                         if (j == i)
1092                                 continue;
1093 
1094                         if (range_cfg->range_min <= sel_reg &&
1095                             sel_reg <= range_cfg->range_max) {
1096                                 dev_err(map->dev,
1097                                         "Range %d: selector for %d in window\n",
1098                                         i, j);
1099                                 goto err_range;
1100                         }
1101 
1102                         if (!(win_max < range_cfg->range_min ||
1103                               win_min > range_cfg->range_max)) {
1104                                 dev_err(map->dev,
1105                                         "Range %d: window for %d in window\n",
1106                                         i, j);
1107                                 goto err_range;
1108                         }
1109                 }
1110 
1111                 new = kzalloc(sizeof(*new), GFP_KERNEL);
1112                 if (new == NULL) {
1113                         ret = -ENOMEM;
1114                         goto err_range;
1115                 }
1116 
1117                 new->map = map;
1118                 new->name = range_cfg->name;
1119                 new->range_min = range_cfg->range_min;
1120                 new->range_max = range_cfg->range_max;
1121                 new->selector_reg = range_cfg->selector_reg;
1122                 new->selector_mask = range_cfg->selector_mask;
1123                 new->selector_shift = range_cfg->selector_shift;
1124                 new->window_start = range_cfg->window_start;
1125                 new->window_len = range_cfg->window_len;
1126 
1127                 if (!_regmap_range_add(map, new)) {
1128                         dev_err(map->dev, "Failed to add range %d\n", i);
1129                         kfree(new);
1130                         goto err_range;
1131                 }
1132 
1133                 if (map->selector_work_buf == NULL) {
1134                         map->selector_work_buf =
1135                                 kzalloc(map->format.buf_size, GFP_KERNEL);
1136                         if (map->selector_work_buf == NULL) {
1137                                 ret = -ENOMEM;
1138                                 goto err_range;
1139                         }
1140                 }
1141         }
1142 
1143         ret = regcache_init(map, config);
1144         if (ret != 0)
1145                 goto err_range;
1146 
1147         if (dev) {
1148                 ret = regmap_attach_dev(dev, map, config);
1149                 if (ret != 0)
1150                         goto err_regcache;
1151         } else {
1152                 regmap_debugfs_init(map, config->name);
1153         }
1154 
1155         return map;
1156 
1157 err_regcache:
1158         regcache_exit(map);
1159 err_range:
1160         regmap_range_exit(map);
1161         kfree(map->work_buf);
1162 err_hwlock:
1163         if (map->hwlock)
1164                 hwspin_lock_free(map->hwlock);
1165 err_name:
1166         kfree_const(map->name);
1167 err_map:
1168         kfree(map);
1169 err:
1170         return ERR_PTR(ret);
1171 }
1172 EXPORT_SYMBOL_GPL(__regmap_init);
1173 
1174 static void devm_regmap_release(struct device *dev, void *res)
1175 {
1176         regmap_exit(*(struct regmap **)res);
1177 }
1178 
1179 struct regmap *__devm_regmap_init(struct device *dev,
1180                                   const struct regmap_bus *bus,
1181                                   void *bus_context,
1182                                   const struct regmap_config *config,
1183                                   struct lock_class_key *lock_key,
1184                                   const char *lock_name)
1185 {
1186         struct regmap **ptr, *regmap;
1187 
1188         ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1189         if (!ptr)
1190                 return ERR_PTR(-ENOMEM);
1191 
1192         regmap = __regmap_init(dev, bus, bus_context, config,
1193                                lock_key, lock_name);
1194         if (!IS_ERR(regmap)) {
1195                 *ptr = regmap;
1196                 devres_add(dev, ptr);
1197         } else {
1198                 devres_free(ptr);
1199         }
1200 
1201         return regmap;
1202 }
1203 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1204 
1205 static void regmap_field_init(struct regmap_field *rm_field,
1206         struct regmap *regmap, struct reg_field reg_field)
1207 {
1208         rm_field->regmap = regmap;
1209         rm_field->reg = reg_field.reg;
1210         rm_field->shift = reg_field.lsb;
1211         rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1212         rm_field->id_size = reg_field.id_size;
1213         rm_field->id_offset = reg_field.id_offset;
1214 }
1215 
1216 /**
1217  * devm_regmap_field_alloc() - Allocate and initialise a register field.
1218  *
1219  * @dev: Device that will be interacted with
1220  * @regmap: regmap bank in which this register field is located.
1221  * @reg_field: Register field with in the bank.
1222  *
1223  * The return value will be an ERR_PTR() on error or a valid pointer
1224  * to a struct regmap_field. The regmap_field will be automatically freed
1225  * by the device management code.
1226  */
1227 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1228                 struct regmap *regmap, struct reg_field reg_field)
1229 {
1230         struct regmap_field *rm_field = devm_kzalloc(dev,
1231                                         sizeof(*rm_field), GFP_KERNEL);
1232         if (!rm_field)
1233                 return ERR_PTR(-ENOMEM);
1234 
1235         regmap_field_init(rm_field, regmap, reg_field);
1236 
1237         return rm_field;
1238 
1239 }
1240 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1241 
1242 /**
1243  * devm_regmap_field_free() - Free a register field allocated using
1244  *                            devm_regmap_field_alloc.
1245  *
1246  * @dev: Device that will be interacted with
1247  * @field: regmap field which should be freed.
1248  *
1249  * Free register field allocated using devm_regmap_field_alloc(). Usually
1250  * drivers need not call this function, as the memory allocated via devm
1251  * will be freed as per device-driver life-cyle.
1252  */
1253 void devm_regmap_field_free(struct device *dev,
1254         struct regmap_field *field)
1255 {
1256         devm_kfree(dev, field);
1257 }
1258 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1259 
1260 /**
1261  * regmap_field_alloc() - Allocate and initialise a register field.
1262  *
1263  * @regmap: regmap bank in which this register field is located.
1264  * @reg_field: Register field with in the bank.
1265  *
1266  * The return value will be an ERR_PTR() on error or a valid pointer
1267  * to a struct regmap_field. The regmap_field should be freed by the
1268  * user once its finished working with it using regmap_field_free().
1269  */
1270 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1271                 struct reg_field reg_field)
1272 {
1273         struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1274 
1275         if (!rm_field)
1276                 return ERR_PTR(-ENOMEM);
1277 
1278         regmap_field_init(rm_field, regmap, reg_field);
1279 
1280         return rm_field;
1281 }
1282 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1283 
1284 /**
1285  * regmap_field_free() - Free register field allocated using
1286  *                       regmap_field_alloc.
1287  *
1288  * @field: regmap field which should be freed.
1289  */
1290 void regmap_field_free(struct regmap_field *field)
1291 {
1292         kfree(field);
1293 }
1294 EXPORT_SYMBOL_GPL(regmap_field_free);
1295 
1296 /**
1297  * regmap_reinit_cache() - Reinitialise the current register cache
1298  *
1299  * @map: Register map to operate on.
1300  * @config: New configuration.  Only the cache data will be used.
1301  *
1302  * Discard any existing register cache for the map and initialize a
1303  * new cache.  This can be used to restore the cache to defaults or to
1304  * update the cache configuration to reflect runtime discovery of the
1305  * hardware.
1306  *
1307  * No explicit locking is done here, the user needs to ensure that
1308  * this function will not race with other calls to regmap.
1309  */
1310 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1311 {
1312         regcache_exit(map);
1313         regmap_debugfs_exit(map);
1314 
1315         map->max_register = config->max_register;
1316         map->writeable_reg = config->writeable_reg;
1317         map->readable_reg = config->readable_reg;
1318         map->volatile_reg = config->volatile_reg;
1319         map->precious_reg = config->precious_reg;
1320         map->writeable_noinc_reg = config->writeable_noinc_reg;
1321         map->readable_noinc_reg = config->readable_noinc_reg;
1322         map->cache_type = config->cache_type;
1323 
1324         regmap_debugfs_init(map, config->name);
1325 
1326         map->cache_bypass = false;
1327         map->cache_only = false;
1328 
1329         return regcache_init(map, config);
1330 }
1331 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1332 
1333 /**
1334  * regmap_exit() - Free a previously allocated register map
1335  *
1336  * @map: Register map to operate on.
1337  */
1338 void regmap_exit(struct regmap *map)
1339 {
1340         struct regmap_async *async;
1341 
1342         regcache_exit(map);
1343         regmap_debugfs_exit(map);
1344         regmap_range_exit(map);
1345         if (map->bus && map->bus->free_context)
1346                 map->bus->free_context(map->bus_context);
1347         kfree(map->work_buf);
1348         while (!list_empty(&map->async_free)) {
1349                 async = list_first_entry_or_null(&map->async_free,
1350                                                  struct regmap_async,
1351                                                  list);
1352                 list_del(&async->list);
1353                 kfree(async->work_buf);
1354                 kfree(async);
1355         }
1356         if (map->hwlock)
1357                 hwspin_lock_free(map->hwlock);
1358         kfree_const(map->name);
1359         kfree(map);
1360 }
1361 EXPORT_SYMBOL_GPL(regmap_exit);
1362 
1363 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1364 {
1365         struct regmap **r = res;
1366         if (!r || !*r) {
1367                 WARN_ON(!r || !*r);
1368                 return 0;
1369         }
1370 
1371         /* If the user didn't specify a name match any */
1372         if (data)
1373                 return (*r)->name == data;
1374         else
1375                 return 1;
1376 }
1377 
1378 /**
1379  * dev_get_regmap() - Obtain the regmap (if any) for a device
1380  *
1381  * @dev: Device to retrieve the map for
1382  * @name: Optional name for the register map, usually NULL.
1383  *
1384  * Returns the regmap for the device if one is present, or NULL.  If
1385  * name is specified then it must match the name specified when
1386  * registering the device, if it is NULL then the first regmap found
1387  * will be used.  Devices with multiple register maps are very rare,
1388  * generic code should normally not need to specify a name.
1389  */
1390 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1391 {
1392         struct regmap **r = devres_find(dev, dev_get_regmap_release,
1393                                         dev_get_regmap_match, (void *)name);
1394 
1395         if (!r)
1396                 return NULL;
1397         return *r;
1398 }
1399 EXPORT_SYMBOL_GPL(dev_get_regmap);
1400 
1401 /**
1402  * regmap_get_device() - Obtain the device from a regmap
1403  *
1404  * @map: Register map to operate on.
1405  *
1406  * Returns the underlying device that the regmap has been created for.
1407  */
1408 struct device *regmap_get_device(struct regmap *map)
1409 {
1410         return map->dev;
1411 }
1412 EXPORT_SYMBOL_GPL(regmap_get_device);
1413 
1414 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1415                                struct regmap_range_node *range,
1416                                unsigned int val_num)
1417 {
1418         void *orig_work_buf;
1419         unsigned int win_offset;
1420         unsigned int win_page;
1421         bool page_chg;
1422         int ret;
1423 
1424         win_offset = (*reg - range->range_min) % range->window_len;
1425         win_page = (*reg - range->range_min) / range->window_len;
1426 
1427         if (val_num > 1) {
1428                 /* Bulk write shouldn't cross range boundary */
1429                 if (*reg + val_num - 1 > range->range_max)
1430                         return -EINVAL;
1431 
1432                 /* ... or single page boundary */
1433                 if (val_num > range->window_len - win_offset)
1434                         return -EINVAL;
1435         }
1436 
1437         /* It is possible to have selector register inside data window.
1438            In that case, selector register is located on every page and
1439            it needs no page switching, when accessed alone. */
1440         if (val_num > 1 ||
1441             range->window_start + win_offset != range->selector_reg) {
1442                 /* Use separate work_buf during page switching */
1443                 orig_work_buf = map->work_buf;
1444                 map->work_buf = map->selector_work_buf;
1445 
1446                 ret = _regmap_update_bits(map, range->selector_reg,
1447                                           range->selector_mask,
1448                                           win_page << range->selector_shift,
1449                                           &page_chg, false);
1450 
1451                 map->work_buf = orig_work_buf;
1452 
1453                 if (ret != 0)
1454                         return ret;
1455         }
1456 
1457         *reg = range->window_start + win_offset;
1458 
1459         return 0;
1460 }
1461 
1462 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1463                                           unsigned long mask)
1464 {
1465         u8 *buf;
1466         int i;
1467 
1468         if (!mask || !map->work_buf)
1469                 return;
1470 
1471         buf = map->work_buf;
1472 
1473         for (i = 0; i < max_bytes; i++)
1474                 buf[i] |= (mask >> (8 * i)) & 0xff;
1475 }
1476 
1477 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1478                                   const void *val, size_t val_len)
1479 {
1480         struct regmap_range_node *range;
1481         unsigned long flags;
1482         void *work_val = map->work_buf + map->format.reg_bytes +
1483                 map->format.pad_bytes;
1484         void *buf;
1485         int ret = -ENOTSUPP;
1486         size_t len;
1487         int i;
1488 
1489         WARN_ON(!map->bus);
1490 
1491         /* Check for unwritable or noinc registers in range
1492          * before we start
1493          */
1494         if (!regmap_writeable_noinc(map, reg)) {
1495                 for (i = 0; i < val_len / map->format.val_bytes; i++) {
1496                         unsigned int element =
1497                                 reg + regmap_get_offset(map, i);
1498                         if (!regmap_writeable(map, element) ||
1499                                 regmap_writeable_noinc(map, element))
1500                                 return -EINVAL;
1501                 }
1502         }
1503 
1504         if (!map->cache_bypass && map->format.parse_val) {
1505                 unsigned int ival;
1506                 int val_bytes = map->format.val_bytes;
1507                 for (i = 0; i < val_len / val_bytes; i++) {
1508                         ival = map->format.parse_val(val + (i * val_bytes));
1509                         ret = regcache_write(map,
1510                                              reg + regmap_get_offset(map, i),
1511                                              ival);
1512                         if (ret) {
1513                                 dev_err(map->dev,
1514                                         "Error in caching of register: %x ret: %d\n",
1515                                         reg + i, ret);
1516                                 return ret;
1517                         }
1518                 }
1519                 if (map->cache_only) {
1520                         map->cache_dirty = true;
1521                         return 0;
1522                 }
1523         }
1524 
1525         range = _regmap_range_lookup(map, reg);
1526         if (range) {
1527                 int val_num = val_len / map->format.val_bytes;
1528                 int win_offset = (reg - range->range_min) % range->window_len;
1529                 int win_residue = range->window_len - win_offset;
1530 
1531                 /* If the write goes beyond the end of the window split it */
1532                 while (val_num > win_residue) {
1533                         dev_dbg(map->dev, "Writing window %d/%zu\n",
1534                                 win_residue, val_len / map->format.val_bytes);
1535                         ret = _regmap_raw_write_impl(map, reg, val,
1536                                                      win_residue *
1537                                                      map->format.val_bytes);
1538                         if (ret != 0)
1539                                 return ret;
1540 
1541                         reg += win_residue;
1542                         val_num -= win_residue;
1543                         val += win_residue * map->format.val_bytes;
1544                         val_len -= win_residue * map->format.val_bytes;
1545 
1546                         win_offset = (reg - range->range_min) %
1547                                 range->window_len;
1548                         win_residue = range->window_len - win_offset;
1549                 }
1550 
1551                 ret = _regmap_select_page(map, &reg, range, val_num);
1552                 if (ret != 0)
1553                         return ret;
1554         }
1555 
1556         map->format.format_reg(map->work_buf, reg, map->reg_shift);
1557         regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1558                                       map->write_flag_mask);
1559 
1560         /*
1561          * Essentially all I/O mechanisms will be faster with a single
1562          * buffer to write.  Since register syncs often generate raw
1563          * writes of single registers optimise that case.
1564          */
1565         if (val != work_val && val_len == map->format.val_bytes) {
1566                 memcpy(work_val, val, map->format.val_bytes);
1567                 val = work_val;
1568         }
1569 
1570         if (map->async && map->bus->async_write) {
1571                 struct regmap_async *async;
1572 
1573                 trace_regmap_async_write_start(map, reg, val_len);
1574 
1575                 spin_lock_irqsave(&map->async_lock, flags);
1576                 async = list_first_entry_or_null(&map->async_free,
1577                                                  struct regmap_async,
1578                                                  list);
1579                 if (async)
1580                         list_del(&async->list);
1581                 spin_unlock_irqrestore(&map->async_lock, flags);
1582 
1583                 if (!async) {
1584                         async = map->bus->async_alloc();
1585                         if (!async)
1586                                 return -ENOMEM;
1587 
1588                         async->work_buf = kzalloc(map->format.buf_size,
1589                                                   GFP_KERNEL | GFP_DMA);
1590                         if (!async->work_buf) {
1591                                 kfree(async);
1592                                 return -ENOMEM;
1593                         }
1594                 }
1595 
1596                 async->map = map;
1597 
1598                 /* If the caller supplied the value we can use it safely. */
1599                 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1600                        map->format.reg_bytes + map->format.val_bytes);
1601 
1602                 spin_lock_irqsave(&map->async_lock, flags);
1603                 list_add_tail(&async->list, &map->async_list);
1604                 spin_unlock_irqrestore(&map->async_lock, flags);
1605 
1606                 if (val != work_val)
1607                         ret = map->bus->async_write(map->bus_context,
1608                                                     async->work_buf,
1609                                                     map->format.reg_bytes +
1610                                                     map->format.pad_bytes,
1611                                                     val, val_len, async);
1612                 else
1613                         ret = map->bus->async_write(map->bus_context,
1614                                                     async->work_buf,
1615                                                     map->format.reg_bytes +
1616                                                     map->format.pad_bytes +
1617                                                     val_len, NULL, 0, async);
1618 
1619                 if (ret != 0) {
1620                         dev_err(map->dev, "Failed to schedule write: %d\n",
1621                                 ret);
1622 
1623                         spin_lock_irqsave(&map->async_lock, flags);
1624                         list_move(&async->list, &map->async_free);
1625                         spin_unlock_irqrestore(&map->async_lock, flags);
1626                 }
1627 
1628                 return ret;
1629         }
1630 
1631         trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1632 
1633         /* If we're doing a single register write we can probably just
1634          * send the work_buf directly, otherwise try to do a gather
1635          * write.
1636          */
1637         if (val == work_val)
1638                 ret = map->bus->write(map->bus_context, map->work_buf,
1639                                       map->format.reg_bytes +
1640                                       map->format.pad_bytes +
1641                                       val_len);
1642         else if (map->bus->gather_write)
1643                 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1644                                              map->format.reg_bytes +
1645                                              map->format.pad_bytes,
1646                                              val, val_len);
1647         else
1648                 ret = -ENOTSUPP;
1649 
1650         /* If that didn't work fall back on linearising by hand. */
1651         if (ret == -ENOTSUPP) {
1652                 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1653                 buf = kzalloc(len, GFP_KERNEL);
1654                 if (!buf)
1655                         return -ENOMEM;
1656 
1657                 memcpy(buf, map->work_buf, map->format.reg_bytes);
1658                 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1659                        val, val_len);
1660                 ret = map->bus->write(map->bus_context, buf, len);
1661 
1662                 kfree(buf);
1663         } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1664                 /* regcache_drop_region() takes lock that we already have,
1665                  * thus call map->cache_ops->drop() directly
1666                  */
1667                 if (map->cache_ops && map->cache_ops->drop)
1668                         map->cache_ops->drop(map, reg, reg + 1);
1669         }
1670 
1671         trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1672 
1673         return ret;
1674 }
1675 
1676 /**
1677  * regmap_can_raw_write - Test if regmap_raw_write() is supported
1678  *
1679  * @map: Map to check.
1680  */
1681 bool regmap_can_raw_write(struct regmap *map)
1682 {
1683         return map->bus && map->bus->write && map->format.format_val &&
1684                 map->format.format_reg;
1685 }
1686 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1687 
1688 /**
1689  * regmap_get_raw_read_max - Get the maximum size we can read
1690  *
1691  * @map: Map to check.
1692  */
1693 size_t regmap_get_raw_read_max(struct regmap *map)
1694 {
1695         return map->max_raw_read;
1696 }
1697 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1698 
1699 /**
1700  * regmap_get_raw_write_max - Get the maximum size we can read
1701  *
1702  * @map: Map to check.
1703  */
1704 size_t regmap_get_raw_write_max(struct regmap *map)
1705 {
1706         return map->max_raw_write;
1707 }
1708 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1709 
1710 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1711                                        unsigned int val)
1712 {
1713         int ret;
1714         struct regmap_range_node *range;
1715         struct regmap *map = context;
1716 
1717         WARN_ON(!map->bus || !map->format.format_write);
1718 
1719         range = _regmap_range_lookup(map, reg);
1720         if (range) {
1721                 ret = _regmap_select_page(map, &reg, range, 1);
1722                 if (ret != 0)
1723                         return ret;
1724         }
1725 
1726         map->format.format_write(map, reg, val);
1727 
1728         trace_regmap_hw_write_start(map, reg, 1);
1729 
1730         ret = map->bus->write(map->bus_context, map->work_buf,
1731                               map->format.buf_size);
1732 
1733         trace_regmap_hw_write_done(map, reg, 1);
1734 
1735         return ret;
1736 }
1737 
1738 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1739                                  unsigned int val)
1740 {
1741         struct regmap *map = context;
1742 
1743         return map->bus->reg_write(map->bus_context, reg, val);
1744 }
1745 
1746 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1747                                  unsigned int val)
1748 {
1749         struct regmap *map = context;
1750 
1751         WARN_ON(!map->bus || !map->format.format_val);
1752 
1753         map->format.format_val(map->work_buf + map->format.reg_bytes
1754                                + map->format.pad_bytes, val, 0);
1755         return _regmap_raw_write_impl(map, reg,
1756                                       map->work_buf +
1757                                       map->format.reg_bytes +
1758                                       map->format.pad_bytes,
1759                                       map->format.val_bytes);
1760 }
1761 
1762 static inline void *_regmap_map_get_context(struct regmap *map)
1763 {
1764         return (map->bus) ? map : map->bus_context;
1765 }
1766 
1767 int _regmap_write(struct regmap *map, unsigned int reg,
1768                   unsigned int val)
1769 {
1770         int ret;
1771         void *context = _regmap_map_get_context(map);
1772 
1773         if (!regmap_writeable(map, reg))
1774                 return -EIO;
1775 
1776         if (!map->cache_bypass && !map->defer_caching) {
1777                 ret = regcache_write(map, reg, val);
1778                 if (ret != 0)
1779                         return ret;
1780                 if (map->cache_only) {
1781                         map->cache_dirty = true;
1782                         return 0;
1783                 }
1784         }
1785 
1786         if (regmap_should_log(map))
1787                 dev_info(map->dev, "%x <= %x\n", reg, val);
1788 
1789         trace_regmap_reg_write(map, reg, val);
1790 
1791         return map->reg_write(context, reg, val);
1792 }
1793 
1794 /**
1795  * regmap_write() - Write a value to a single register
1796  *
1797  * @map: Register map to write to
1798  * @reg: Register to write to
1799  * @val: Value to be written
1800  *
1801  * A value of zero will be returned on success, a negative errno will
1802  * be returned in error cases.
1803  */
1804 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1805 {
1806         int ret;
1807 
1808         if (!IS_ALIGNED(reg, map->reg_stride))
1809                 return -EINVAL;
1810 
1811         map->lock(map->lock_arg);
1812 
1813         ret = _regmap_write(map, reg, val);
1814 
1815         map->unlock(map->lock_arg);
1816 
1817         return ret;
1818 }
1819 EXPORT_SYMBOL_GPL(regmap_write);
1820 
1821 /**
1822  * regmap_write_async() - Write a value to a single register asynchronously
1823  *
1824  * @map: Register map to write to
1825  * @reg: Register to write to
1826  * @val: Value to be written
1827  *
1828  * A value of zero will be returned on success, a negative errno will
1829  * be returned in error cases.
1830  */
1831 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1832 {
1833         int ret;
1834 
1835         if (!IS_ALIGNED(reg, map->reg_stride))
1836                 return -EINVAL;
1837 
1838         map->lock(map->lock_arg);
1839 
1840         map->async = true;
1841 
1842         ret = _regmap_write(map, reg, val);
1843 
1844         map->async = false;
1845 
1846         map->unlock(map->lock_arg);
1847 
1848         return ret;
1849 }
1850 EXPORT_SYMBOL_GPL(regmap_write_async);
1851 
1852 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1853                       const void *val, size_t val_len)
1854 {
1855         size_t val_bytes = map->format.val_bytes;
1856         size_t val_count = val_len / val_bytes;
1857         size_t chunk_count, chunk_bytes;
1858         size_t chunk_regs = val_count;
1859         int ret, i;
1860 
1861         if (!val_count)
1862                 return -EINVAL;
1863 
1864         if (map->use_single_write)
1865                 chunk_regs = 1;
1866         else if (map->max_raw_write && val_len > map->max_raw_write)
1867                 chunk_regs = map->max_raw_write / val_bytes;
1868 
1869         chunk_count = val_count / chunk_regs;
1870         chunk_bytes = chunk_regs * val_bytes;
1871 
1872         /* Write as many bytes as possible with chunk_size */
1873         for (i = 0; i < chunk_count; i++) {
1874                 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
1875                 if (ret)
1876                         return ret;
1877 
1878                 reg += regmap_get_offset(map, chunk_regs);
1879                 val += chunk_bytes;
1880                 val_len -= chunk_bytes;
1881         }
1882 
1883         /* Write remaining bytes */
1884         if (val_len)
1885                 ret = _regmap_raw_write_impl(map, reg, val, val_len);
1886 
1887         return ret;
1888 }
1889 
1890 /**
1891  * regmap_raw_write() - Write raw values to one or more registers
1892  *
1893  * @map: Register map to write to
1894  * @reg: Initial register to write to
1895  * @val: Block of data to be written, laid out for direct transmission to the
1896  *       device
1897  * @val_len: Length of data pointed to by val.
1898  *
1899  * This function is intended to be used for things like firmware
1900  * download where a large block of data needs to be transferred to the
1901  * device.  No formatting will be done on the data provided.
1902  *
1903  * A value of zero will be returned on success, a negative errno will
1904  * be returned in error cases.
1905  */
1906 int regmap_raw_write(struct regmap *map, unsigned int reg,
1907                      const void *val, size_t val_len)
1908 {
1909         int ret;
1910 
1911         if (!regmap_can_raw_write(map))
1912                 return -EINVAL;
1913         if (val_len % map->format.val_bytes)
1914                 return -EINVAL;
1915 
1916         map->lock(map->lock_arg);
1917 
1918         ret = _regmap_raw_write(map, reg, val, val_len);
1919 
1920         map->unlock(map->lock_arg);
1921 
1922         return ret;
1923 }
1924 EXPORT_SYMBOL_GPL(regmap_raw_write);
1925 
1926 /**
1927  * regmap_noinc_write(): Write data from a register without incrementing the
1928  *                      register number
1929  *
1930  * @map: Register map to write to
1931  * @reg: Register to write to
1932  * @val: Pointer to data buffer
1933  * @val_len: Length of output buffer in bytes.
1934  *
1935  * The regmap API usually assumes that bulk bus write operations will write a
1936  * range of registers. Some devices have certain registers for which a write
1937  * operation can write to an internal FIFO.
1938  *
1939  * The target register must be volatile but registers after it can be
1940  * completely unrelated cacheable registers.
1941  *
1942  * This will attempt multiple writes as required to write val_len bytes.
1943  *
1944  * A value of zero will be returned on success, a negative errno will be
1945  * returned in error cases.
1946  */
1947 int regmap_noinc_write(struct regmap *map, unsigned int reg,
1948                       const void *val, size_t val_len)
1949 {
1950         size_t write_len;
1951         int ret;
1952 
1953         if (!map->bus)
1954                 return -EINVAL;
1955         if (!map->bus->write)
1956                 return -ENOTSUPP;
1957         if (val_len % map->format.val_bytes)
1958                 return -EINVAL;
1959         if (!IS_ALIGNED(reg, map->reg_stride))
1960                 return -EINVAL;
1961         if (val_len == 0)
1962                 return -EINVAL;
1963 
1964         map->lock(map->lock_arg);
1965 
1966         if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
1967                 ret = -EINVAL;
1968                 goto out_unlock;
1969         }
1970 
1971         while (val_len) {
1972                 if (map->max_raw_write && map->max_raw_write < val_len)
1973                         write_len = map->max_raw_write;
1974                 else
1975                         write_len = val_len;
1976                 ret = _regmap_raw_write(map, reg, val, write_len);
1977                 if (ret)
1978                         goto out_unlock;
1979                 val = ((u8 *)val) + write_len;
1980                 val_len -= write_len;
1981         }
1982 
1983 out_unlock:
1984         map->unlock(map->lock_arg);
1985         return ret;
1986 }
1987 EXPORT_SYMBOL_GPL(regmap_noinc_write);
1988 
1989 /**
1990  * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
1991  *                                   register field.
1992  *
1993  * @field: Register field to write to
1994  * @mask: Bitmask to change
1995  * @val: Value to be written
1996  * @change: Boolean indicating if a write was done
1997  * @async: Boolean indicating asynchronously
1998  * @force: Boolean indicating use force update
1999  *
2000  * Perform a read/modify/write cycle on the register field with change,
2001  * async, force option.
2002  *
2003  * A value of zero will be returned on success, a negative errno will
2004  * be returned in error cases.
2005  */
2006 int regmap_field_update_bits_base(struct regmap_field *field,
2007                                   unsigned int mask, unsigned int val,
2008                                   bool *change, bool async, bool force)
2009 {
2010         mask = (mask << field->shift) & field->mask;
2011 
2012         return regmap_update_bits_base(field->regmap, field->reg,
2013                                        mask, val << field->shift,
2014                                        change, async, force);
2015 }
2016 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2017 
2018 /**
2019  * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2020  *                                    register field with port ID
2021  *
2022  * @field: Register field to write to
2023  * @id: port ID
2024  * @mask: Bitmask to change
2025  * @val: Value to be written
2026  * @change: Boolean indicating if a write was done
2027  * @async: Boolean indicating asynchronously
2028  * @force: Boolean indicating use force update
2029  *
2030  * A value of zero will be returned on success, a negative errno will
2031  * be returned in error cases.
2032  */
2033 int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
2034                                    unsigned int mask, unsigned int val,
2035                                    bool *change, bool async, bool force)
2036 {
2037         if (id >= field->id_size)
2038                 return -EINVAL;
2039 
2040         mask = (mask << field->shift) & field->mask;
2041 
2042         return regmap_update_bits_base(field->regmap,
2043                                        field->reg + (field->id_offset * id),
2044                                        mask, val << field->shift,
2045                                        change, async, force);
2046 }
2047 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2048 
2049 /**
2050  * regmap_bulk_write() - Write multiple registers to the device
2051  *
2052  * @map: Register map to write to
2053  * @reg: First register to be write from
2054  * @val: Block of data to be written, in native register size for device
2055  * @val_count: Number of registers to write
2056  *
2057  * This function is intended to be used for writing a large block of
2058  * data to the device either in single transfer or multiple transfer.
2059  *
2060  * A value of zero will be returned on success, a negative errno will
2061  * be returned in error cases.
2062  */
2063 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2064                      size_t val_count)
2065 {
2066         int ret = 0, i;
2067         size_t val_bytes = map->format.val_bytes;
2068 
2069         if (!IS_ALIGNED(reg, map->reg_stride))
2070                 return -EINVAL;
2071 
2072         /*
2073          * Some devices don't support bulk write, for them we have a series of
2074          * single write operations.
2075          */
2076         if (!map->bus || !map->format.parse_inplace) {
2077                 map->lock(map->lock_arg);
2078                 for (i = 0; i < val_count; i++) {
2079                         unsigned int ival;
2080 
2081                         switch (val_bytes) {
2082                         case 1:
2083                                 ival = *(u8 *)(val + (i * val_bytes));
2084                                 break;
2085                         case 2:
2086                                 ival = *(u16 *)(val + (i * val_bytes));
2087                                 break;
2088                         case 4:
2089                                 ival = *(u32 *)(val + (i * val_bytes));
2090                                 break;
2091 #ifdef CONFIG_64BIT
2092                         case 8:
2093                                 ival = *(u64 *)(val + (i * val_bytes));
2094                                 break;
2095 #endif
2096                         default:
2097                                 ret = -EINVAL;
2098                                 goto out;
2099                         }
2100 
2101                         ret = _regmap_write(map,
2102                                             reg + regmap_get_offset(map, i),
2103                                             ival);
2104                         if (ret != 0)
2105                                 goto out;
2106                 }
2107 out:
2108                 map->unlock(map->lock_arg);
2109         } else {
2110                 void *wval;
2111 
2112                 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2113                 if (!wval)
2114                         return -ENOMEM;
2115 
2116                 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2117                         map->format.parse_inplace(wval + i);
2118 
2119                 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2120 
2121                 kfree(wval);
2122         }
2123         return ret;
2124 }
2125 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2126 
2127 /*
2128  * _regmap_raw_multi_reg_write()
2129  *
2130  * the (register,newvalue) pairs in regs have not been formatted, but
2131  * they are all in the same page and have been changed to being page
2132  * relative. The page register has been written if that was necessary.
2133  */
2134 static int _regmap_raw_multi_reg_write(struct regmap *map,
2135                                        const struct reg_sequence *regs,
2136                                        size_t num_regs)
2137 {
2138         int ret;
2139         void *buf;
2140         int i;
2141         u8 *u8;
2142         size_t val_bytes = map->format.val_bytes;
2143         size_t reg_bytes = map->format.reg_bytes;
2144         size_t pad_bytes = map->format.pad_bytes;
2145         size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2146         size_t len = pair_size * num_regs;
2147 
2148         if (!len)
2149                 return -EINVAL;
2150 
2151         buf = kzalloc(len, GFP_KERNEL);
2152         if (!buf)
2153                 return -ENOMEM;
2154 
2155         /* We have to linearise by hand. */
2156 
2157         u8 = buf;
2158 
2159         for (i = 0; i < num_regs; i++) {
2160                 unsigned int reg = regs[i].reg;
2161                 unsigned int val = regs[i].def;
2162                 trace_regmap_hw_write_start(map, reg, 1);
2163                 map->format.format_reg(u8, reg, map->reg_shift);
2164                 u8 += reg_bytes + pad_bytes;
2165                 map->format.format_val(u8, val, 0);
2166                 u8 += val_bytes;
2167         }
2168         u8 = buf;
2169         *u8 |= map->write_flag_mask;
2170 
2171         ret = map->bus->write(map->bus_context, buf, len);
2172 
2173         kfree(buf);
2174 
2175         for (i = 0; i < num_regs; i++) {
2176                 int reg = regs[i].reg;
2177                 trace_regmap_hw_write_done(map, reg, 1);
2178         }
2179         return ret;
2180 }
2181 
2182 static unsigned int _regmap_register_page(struct regmap *map,
2183                                           unsigned int reg,
2184                                           struct regmap_range_node *range)
2185 {
2186         unsigned int win_page = (reg - range->range_min) / range->window_len;
2187 
2188         return win_page;
2189 }
2190 
2191 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2192                                                struct reg_sequence *regs,
2193                                                size_t num_regs)
2194 {
2195         int ret;
2196         int i, n;
2197         struct reg_sequence *base;
2198         unsigned int this_page = 0;
2199         unsigned int page_change = 0;
2200         /*
2201          * the set of registers are not neccessarily in order, but
2202          * since the order of write must be preserved this algorithm
2203          * chops the set each time the page changes. This also applies
2204          * if there is a delay required at any point in the sequence.
2205          */
2206         base = regs;
2207         for (i = 0, n = 0; i < num_regs; i++, n++) {
2208                 unsigned int reg = regs[i].reg;
2209                 struct regmap_range_node *range;
2210 
2211                 range = _regmap_range_lookup(map, reg);
2212                 if (range) {
2213                         unsigned int win_page = _regmap_register_page(map, reg,
2214                                                                       range);
2215 
2216                         if (i == 0)
2217                                 this_page = win_page;
2218                         if (win_page != this_page) {
2219                                 this_page = win_page;
2220                                 page_change = 1;
2221                         }
2222                 }
2223 
2224                 /* If we have both a page change and a delay make sure to
2225                  * write the regs and apply the delay before we change the
2226                  * page.
2227                  */
2228 
2229                 if (page_change || regs[i].delay_us) {
2230 
2231                                 /* For situations where the first write requires
2232                                  * a delay we need to make sure we don't call
2233                                  * raw_multi_reg_write with n=0
2234                                  * This can't occur with page breaks as we
2235                                  * never write on the first iteration
2236                                  */
2237                                 if (regs[i].delay_us && i == 0)
2238                                         n = 1;
2239 
2240                                 ret = _regmap_raw_multi_reg_write(map, base, n);
2241                                 if (ret != 0)
2242                                         return ret;
2243 
2244                                 if (regs[i].delay_us)
2245                                         udelay(regs[i].delay_us);
2246 
2247                                 base += n;
2248                                 n = 0;
2249 
2250                                 if (page_change) {
2251                                         ret = _regmap_select_page(map,
2252                                                                   &base[n].reg,
2253                                                                   range, 1);
2254                                         if (ret != 0)
2255                                                 return ret;
2256 
2257                                         page_change = 0;
2258                                 }
2259 
2260                 }
2261 
2262         }
2263         if (n > 0)
2264                 return _regmap_raw_multi_reg_write(map, base, n);
2265         return 0;
2266 }
2267 
2268 static int _regmap_multi_reg_write(struct regmap *map,
2269                                    const struct reg_sequence *regs,
2270                                    size_t num_regs)
2271 {
2272         int i;
2273         int ret;
2274 
2275         if (!map->can_multi_write) {
2276                 for (i = 0; i < num_regs; i++) {
2277                         ret = _regmap_write(map, regs[i].reg, regs[i].def);
2278                         if (ret != 0)
2279                                 return ret;
2280 
2281                         if (regs[i].delay_us)
2282                                 udelay(regs[i].delay_us);
2283                 }
2284                 return 0;
2285         }
2286 
2287         if (!map->format.parse_inplace)
2288                 return -EINVAL;
2289 
2290         if (map->writeable_reg)
2291                 for (i = 0; i < num_regs; i++) {
2292                         int reg = regs[i].reg;
2293                         if (!map->writeable_reg(map->dev, reg))
2294                                 return -EINVAL;
2295                         if (!IS_ALIGNED(reg, map->reg_stride))
2296                                 return -EINVAL;
2297                 }
2298 
2299         if (!map->cache_bypass) {
2300                 for (i = 0; i < num_regs; i++) {
2301                         unsigned int val = regs[i].def;
2302                         unsigned int reg = regs[i].reg;
2303                         ret = regcache_write(map, reg, val);
2304                         if (ret) {
2305                                 dev_err(map->dev,
2306                                 "Error in caching of register: %x ret: %d\n",
2307                                                                 reg, ret);
2308                                 return ret;
2309                         }
2310                 }
2311                 if (map->cache_only) {
2312                         map->cache_dirty = true;
2313                         return 0;
2314                 }
2315         }
2316 
2317         WARN_ON(!map->bus);
2318 
2319         for (i = 0; i < num_regs; i++) {
2320                 unsigned int reg = regs[i].reg;
2321                 struct regmap_range_node *range;
2322 
2323                 /* Coalesce all the writes between a page break or a delay
2324                  * in a sequence
2325                  */
2326                 range = _regmap_range_lookup(map, reg);
2327                 if (range || regs[i].delay_us) {
2328                         size_t len = sizeof(struct reg_sequence)*num_regs;
2329                         struct reg_sequence *base = kmemdup(regs, len,
2330                                                            GFP_KERNEL);
2331                         if (!base)
2332                                 return -ENOMEM;
2333                         ret = _regmap_range_multi_paged_reg_write(map, base,
2334                                                                   num_regs);
2335                         kfree(base);
2336 
2337                         return ret;
2338                 }
2339         }
2340         return _regmap_raw_multi_reg_write(map, regs, num_regs);
2341 }
2342 
2343 /**
2344  * regmap_multi_reg_write() - Write multiple registers to the device
2345  *
2346  * @map: Register map to write to
2347  * @regs: Array of structures containing register,value to be written
2348  * @num_regs: Number of registers to write
2349  *
2350  * Write multiple registers to the device where the set of register, value
2351  * pairs are supplied in any order, possibly not all in a single range.
2352  *
2353  * The 'normal' block write mode will send ultimately send data on the
2354  * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2355  * addressed. However, this alternative block multi write mode will send
2356  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2357  * must of course support the mode.
2358  *
2359  * A value of zero will be returned on success, a negative errno will be
2360  * returned in error cases.
2361  */
2362 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2363                            int num_regs)
2364 {
2365         int ret;
2366 
2367         map->lock(map->lock_arg);
2368 
2369         ret = _regmap_multi_reg_write(map, regs, num_regs);
2370 
2371         map->unlock(map->lock_arg);
2372 
2373         return ret;
2374 }
2375 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2376 
2377 /**
2378  * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2379  *                                     device but not the cache
2380  *
2381  * @map: Register map to write to
2382  * @regs: Array of structures containing register,value to be written
2383  * @num_regs: Number of registers to write
2384  *
2385  * Write multiple registers to the device but not the cache where the set
2386  * of register are supplied in any order.
2387  *
2388  * This function is intended to be used for writing a large block of data
2389  * atomically to the device in single transfer for those I2C client devices
2390  * that implement this alternative block write mode.
2391  *
2392  * A value of zero will be returned on success, a negative errno will
2393  * be returned in error cases.
2394  */
2395 int regmap_multi_reg_write_bypassed(struct regmap *map,
2396                                     const struct reg_sequence *regs,
2397                                     int num_regs)
2398 {
2399         int ret;
2400         bool bypass;
2401 
2402         map->lock(map->lock_arg);
2403 
2404         bypass = map->cache_bypass;
2405         map->cache_bypass = true;
2406 
2407         ret = _regmap_multi_reg_write(map, regs, num_regs);
2408 
2409         map->cache_bypass = bypass;
2410 
2411         map->unlock(map->lock_arg);
2412 
2413         return ret;
2414 }
2415 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2416 
2417 /**
2418  * regmap_raw_write_async() - Write raw values to one or more registers
2419  *                            asynchronously
2420  *
2421  * @map: Register map to write to
2422  * @reg: Initial register to write to
2423  * @val: Block of data to be written, laid out for direct transmission to the
2424  *       device.  Must be valid until regmap_async_complete() is called.
2425  * @val_len: Length of data pointed to by val.
2426  *
2427  * This function is intended to be used for things like firmware
2428  * download where a large block of data needs to be transferred to the
2429  * device.  No formatting will be done on the data provided.
2430  *
2431  * If supported by the underlying bus the write will be scheduled
2432  * asynchronously, helping maximise I/O speed on higher speed buses
2433  * like SPI.  regmap_async_complete() can be called to ensure that all
2434  * asynchrnous writes have been completed.
2435  *
2436  * A value of zero will be returned on success, a negative errno will
2437  * be returned in error cases.
2438  */
2439 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2440                            const void *val, size_t val_len)
2441 {
2442         int ret;
2443 
2444         if (val_len % map->format.val_bytes)
2445                 return -EINVAL;
2446         if (!IS_ALIGNED(reg, map->reg_stride))
2447                 return -EINVAL;
2448 
2449         map->lock(map->lock_arg);
2450 
2451         map->async = true;
2452 
2453         ret = _regmap_raw_write(map, reg, val, val_len);
2454 
2455         map->async = false;
2456 
2457         map->unlock(map->lock_arg);
2458 
2459         return ret;
2460 }
2461 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2462 
2463 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2464                             unsigned int val_len)
2465 {
2466         struct regmap_range_node *range;
2467         int ret;
2468 
2469         WARN_ON(!map->bus);
2470 
2471         if (!map->bus || !map->bus->read)
2472                 return -EINVAL;
2473 
2474         range = _regmap_range_lookup(map, reg);
2475         if (range) {
2476                 ret = _regmap_select_page(map, &reg, range,
2477                                           val_len / map->format.val_bytes);
2478                 if (ret != 0)
2479                         return ret;
2480         }
2481 
2482         map->format.format_reg(map->work_buf, reg, map->reg_shift);
2483         regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2484                                       map->read_flag_mask);
2485         trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2486 
2487         ret = map->bus->read(map->bus_context, map->work_buf,
2488                              map->format.reg_bytes + map->format.pad_bytes,
2489                              val, val_len);
2490 
2491         trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2492 
2493         return ret;
2494 }
2495 
2496 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2497                                 unsigned int *val)
2498 {
2499         struct regmap *map = context;
2500 
2501         return map->bus->reg_read(map->bus_context, reg, val);
2502 }
2503 
2504 static int _regmap_bus_read(void *context, unsigned int reg,
2505                             unsigned int *val)
2506 {
2507         int ret;
2508         struct regmap *map = context;
2509         void *work_val = map->work_buf + map->format.reg_bytes +
2510                 map->format.pad_bytes;
2511 
2512         if (!map->format.parse_val)
2513                 return -EINVAL;
2514 
2515         ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
2516         if (ret == 0)
2517                 *val = map->format.parse_val(work_val);
2518 
2519         return ret;
2520 }
2521 
2522 static int _regmap_read(struct regmap *map, unsigned int reg,
2523                         unsigned int *val)
2524 {
2525         int ret;
2526         void *context = _regmap_map_get_context(map);
2527 
2528         if (!map->cache_bypass) {
2529                 ret = regcache_read(map, reg, val);
2530                 if (ret == 0)
2531                         return 0;
2532         }
2533 
2534         if (map->cache_only)
2535                 return -EBUSY;
2536 
2537         if (!regmap_readable(map, reg))
2538                 return -EIO;
2539 
2540         ret = map->reg_read(context, reg, val);
2541         if (ret == 0) {
2542                 if (regmap_should_log(map))
2543                         dev_info(map->dev, "%x => %x\n", reg, *val);
2544 
2545                 trace_regmap_reg_read(map, reg, *val);
2546 
2547                 if (!map->cache_bypass)
2548                         regcache_write(map, reg, *val);
2549         }
2550 
2551         return ret;
2552 }
2553 
2554 /**
2555  * regmap_read() - Read a value from a single register
2556  *
2557  * @map: Register map to read from
2558  * @reg: Register to be read from
2559  * @val: Pointer to store read value
2560  *
2561  * A value of zero will be returned on success, a negative errno will
2562  * be returned in error cases.
2563  */
2564 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2565 {
2566         int ret;
2567 
2568         if (!IS_ALIGNED(reg, map->reg_stride))
2569                 return -EINVAL;
2570 
2571         map->lock(map->lock_arg);
2572 
2573         ret = _regmap_read(map, reg, val);
2574 
2575         map->unlock(map->lock_arg);
2576 
2577         return ret;
2578 }
2579 EXPORT_SYMBOL_GPL(regmap_read);
2580 
2581 /**
2582  * regmap_raw_read() - Read raw data from the device
2583  *
2584  * @map: Register map to read from
2585  * @reg: First register to be read from
2586  * @val: Pointer to store read value
2587  * @val_len: Size of data to read
2588  *
2589  * A value of zero will be returned on success, a negative errno will
2590  * be returned in error cases.
2591  */
2592 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2593                     size_t val_len)
2594 {
2595         size_t val_bytes = map->format.val_bytes;
2596         size_t val_count = val_len / val_bytes;
2597         unsigned int v;
2598         int ret, i;
2599 
2600         if (!map->bus)
2601                 return -EINVAL;
2602         if (val_len % map->format.val_bytes)
2603                 return -EINVAL;
2604         if (!IS_ALIGNED(reg, map->reg_stride))
2605                 return -EINVAL;
2606         if (val_count == 0)
2607                 return -EINVAL;
2608 
2609         map->lock(map->lock_arg);
2610 
2611         if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2612             map->cache_type == REGCACHE_NONE) {
2613                 size_t chunk_count, chunk_bytes;
2614                 size_t chunk_regs = val_count;
2615 
2616                 if (!map->bus->read) {
2617                         ret = -ENOTSUPP;
2618                         goto out;
2619                 }
2620 
2621                 if (map->use_single_read)
2622                         chunk_regs = 1;
2623                 else if (map->max_raw_read && val_len > map->max_raw_read)
2624                         chunk_regs = map->max_raw_read / val_bytes;
2625 
2626                 chunk_count = val_count / chunk_regs;
2627                 chunk_bytes = chunk_regs * val_bytes;
2628 
2629                 /* Read bytes that fit into whole chunks */
2630                 for (i = 0; i < chunk_count; i++) {
2631                         ret = _regmap_raw_read(map, reg, val, chunk_bytes);
2632                         if (ret != 0)
2633                                 goto out;
2634 
2635                         reg += regmap_get_offset(map, chunk_regs);
2636                         val += chunk_bytes;
2637                         val_len -= chunk_bytes;
2638                 }
2639 
2640                 /* Read remaining bytes */
2641                 if (val_len) {
2642                         ret = _regmap_raw_read(map, reg, val, val_len);
2643                         if (ret != 0)
2644                                 goto out;
2645                 }
2646         } else {
2647                 /* Otherwise go word by word for the cache; should be low
2648                  * cost as we expect to hit the cache.
2649                  */
2650                 for (i = 0; i < val_count; i++) {
2651                         ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2652                                            &v);
2653                         if (ret != 0)
2654                                 goto out;
2655 
2656                         map->format.format_val(val + (i * val_bytes), v, 0);
2657                 }
2658         }
2659 
2660  out:
2661         map->unlock(map->lock_arg);
2662 
2663         return ret;
2664 }
2665 EXPORT_SYMBOL_GPL(regmap_raw_read);
2666 
2667 /**
2668  * regmap_noinc_read(): Read data from a register without incrementing the
2669  *                      register number
2670  *
2671  * @map: Register map to read from
2672  * @reg: Register to read from
2673  * @val: Pointer to data buffer
2674  * @val_len: Length of output buffer in bytes.
2675  *
2676  * The regmap API usually assumes that bulk bus read operations will read a
2677  * range of registers. Some devices have certain registers for which a read
2678  * operation read will read from an internal FIFO.
2679  *
2680  * The target register must be volatile but registers after it can be
2681  * completely unrelated cacheable registers.
2682  *
2683  * This will attempt multiple reads as required to read val_len bytes.
2684  *
2685  * A value of zero will be returned on success, a negative errno will be
2686  * returned in error cases.
2687  */
2688 int regmap_noinc_read(struct regmap *map, unsigned int reg,
2689                       void *val, size_t val_len)
2690 {
2691         size_t read_len;
2692         int ret;
2693 
2694         if (!map->bus)
2695                 return -EINVAL;
2696         if (!map->bus->read)
2697                 return -ENOTSUPP;
2698         if (val_len % map->format.val_bytes)
2699                 return -EINVAL;
2700         if (!IS_ALIGNED(reg, map->reg_stride))
2701                 return -EINVAL;
2702         if (val_len == 0)
2703                 return -EINVAL;
2704 
2705         map->lock(map->lock_arg);
2706 
2707         if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2708                 ret = -EINVAL;
2709                 goto out_unlock;
2710         }
2711 
2712         while (val_len) {
2713                 if (map->max_raw_read && map->max_raw_read < val_len)
2714                         read_len = map->max_raw_read;
2715                 else
2716                         read_len = val_len;
2717                 ret = _regmap_raw_read(map, reg, val, read_len);
2718                 if (ret)
2719                         goto out_unlock;
2720                 val = ((u8 *)val) + read_len;
2721                 val_len -= read_len;
2722         }
2723 
2724 out_unlock:
2725         map->unlock(map->lock_arg);
2726         return ret;
2727 }
2728 EXPORT_SYMBOL_GPL(regmap_noinc_read);
2729 
2730 /**
2731  * regmap_field_read(): Read a value to a single register field
2732  *
2733  * @field: Register field to read from
2734  * @val: Pointer to store read value
2735  *
2736  * A value of zero will be returned on success, a negative errno will
2737  * be returned in error cases.
2738  */
2739 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2740 {
2741         int ret;
2742         unsigned int reg_val;
2743         ret = regmap_read(field->regmap, field->reg, &reg_val);
2744         if (ret != 0)
2745                 return ret;
2746 
2747         reg_val &= field->mask;
2748         reg_val >>= field->shift;
2749         *val = reg_val;
2750 
2751         return ret;
2752 }
2753 EXPORT_SYMBOL_GPL(regmap_field_read);
2754 
2755 /**
2756  * regmap_fields_read() - Read a value to a single register field with port ID
2757  *
2758  * @field: Register field to read from
2759  * @id: port ID
2760  * @val: Pointer to store read value
2761  *
2762  * A value of zero will be returned on success, a negative errno will
2763  * be returned in error cases.
2764  */
2765 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2766                        unsigned int *val)
2767 {
2768         int ret;
2769         unsigned int reg_val;
2770 
2771         if (id >= field->id_size)
2772                 return -EINVAL;
2773 
2774         ret = regmap_read(field->regmap,
2775                           field->reg + (field->id_offset * id),
2776                           &reg_val);
2777         if (ret != 0)
2778                 return ret;
2779 
2780         reg_val &= field->mask;
2781         reg_val >>= field->shift;
2782         *val = reg_val;
2783 
2784         return ret;
2785 }
2786 EXPORT_SYMBOL_GPL(regmap_fields_read);
2787 
2788 /**
2789  * regmap_bulk_read() - Read multiple registers from the device
2790  *
2791  * @map: Register map to read from
2792  * @reg: First register to be read from
2793  * @val: Pointer to store read value, in native register size for device
2794  * @val_count: Number of registers to read
2795  *
2796  * A value of zero will be returned on success, a negative errno will
2797  * be returned in error cases.
2798  */
2799 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2800                      size_t val_count)
2801 {
2802         int ret, i;
2803         size_t val_bytes = map->format.val_bytes;
2804         bool vol = regmap_volatile_range(map, reg, val_count);
2805 
2806         if (!IS_ALIGNED(reg, map->reg_stride))
2807                 return -EINVAL;
2808         if (val_count == 0)
2809                 return -EINVAL;
2810 
2811         if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2812                 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
2813                 if (ret != 0)
2814                         return ret;
2815 
2816                 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2817                         map->format.parse_inplace(val + i);
2818         } else {
2819 #ifdef CONFIG_64BIT
2820                 u64 *u64 = val;
2821 #endif
2822                 u32 *u32 = val;
2823                 u16 *u16 = val;
2824                 u8 *u8 = val;
2825 
2826                 map->lock(map->lock_arg);
2827 
2828                 for (i = 0; i < val_count; i++) {
2829                         unsigned int ival;
2830 
2831                         ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2832                                            &ival);
2833                         if (ret != 0)
2834                                 goto out;
2835 
2836                         switch (map->format.val_bytes) {
2837 #ifdef CONFIG_64BIT
2838                         case 8:
2839                                 u64[i] = ival;
2840                                 break;
2841 #endif
2842                         case 4:
2843                                 u32[i] = ival;
2844                                 break;
2845                         case 2:
2846                                 u16[i] = ival;
2847                                 break;
2848                         case 1:
2849                                 u8[i] = ival;
2850                                 break;
2851                         default:
2852                                 ret = -EINVAL;
2853                                 goto out;
2854                         }
2855                 }
2856 
2857 out:
2858                 map->unlock(map->lock_arg);
2859         }
2860 
2861         return ret;
2862 }
2863 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2864 
2865 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2866                                unsigned int mask, unsigned int val,
2867                                bool *change, bool force_write)
2868 {
2869         int ret;
2870         unsigned int tmp, orig;
2871 
2872         if (change)
2873                 *change = false;
2874 
2875         if (regmap_volatile(map, reg) && map->reg_update_bits) {
2876                 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2877                 if (ret == 0 && change)
2878                         *change = true;
2879         } else {
2880                 ret = _regmap_read(map, reg, &orig);
2881                 if (ret != 0)
2882                         return ret;
2883 
2884                 tmp = orig & ~mask;
2885                 tmp |= val & mask;
2886 
2887                 if (force_write || (tmp != orig)) {
2888                         ret = _regmap_write(map, reg, tmp);
2889                         if (ret == 0 && change)
2890                                 *change = true;
2891                 }
2892         }
2893 
2894         return ret;
2895 }
2896 
2897 /**
2898  * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
2899  *
2900  * @map: Register map to update
2901  * @reg: Register to update
2902  * @mask: Bitmask to change
2903  * @val: New value for bitmask
2904  * @change: Boolean indicating if a write was done
2905  * @async: Boolean indicating asynchronously
2906  * @force: Boolean indicating use force update
2907  *
2908  * Perform a read/modify/write cycle on a register map with change, async, force
2909  * options.
2910  *
2911  * If async is true:
2912  *
2913  * With most buses the read must be done synchronously so this is most useful
2914  * for devices with a cache which do not need to interact with the hardware to
2915  * determine the current register value.
2916  *
2917  * Returns zero for success, a negative number on error.
2918  */
2919 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2920                             unsigned int mask, unsigned int val,
2921                             bool *change, bool async, bool force)
2922 {
2923         int ret;
2924 
2925         map->lock(map->lock_arg);
2926 
2927         map->async = async;
2928 
2929         ret = _regmap_update_bits(map, reg, mask, val, change, force);
2930 
2931         map->async = false;
2932 
2933         map->unlock(map->lock_arg);
2934 
2935         return ret;
2936 }
2937 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
2938 
2939 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2940 {
2941         struct regmap *map = async->map;
2942         bool wake;
2943 
2944         trace_regmap_async_io_complete(map);
2945 
2946         spin_lock(&map->async_lock);
2947         list_move(&async->list, &map->async_free);
2948         wake = list_empty(&map->async_list);
2949 
2950         if (ret != 0)
2951                 map->async_ret = ret;
2952 
2953         spin_unlock(&map->async_lock);
2954 
2955         if (wake)
2956                 wake_up(&map->async_waitq);
2957 }
2958 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2959 
2960 static int regmap_async_is_done(struct regmap *map)
2961 {
2962         unsigned long flags;
2963         int ret;
2964 
2965         spin_lock_irqsave(&map->async_lock, flags);
2966         ret = list_empty(&map->async_list);
2967         spin_unlock_irqrestore(&map->async_lock, flags);
2968 
2969         return ret;
2970 }
2971 
2972 /**
2973  * regmap_async_complete - Ensure all asynchronous I/O has completed.
2974  *
2975  * @map: Map to operate on.
2976  *
2977  * Blocks until any pending asynchronous I/O has completed.  Returns
2978  * an error code for any failed I/O operations.
2979  */
2980 int regmap_async_complete(struct regmap *map)
2981 {
2982         unsigned long flags;
2983         int ret;
2984 
2985         /* Nothing to do with no async support */
2986         if (!map->bus || !map->bus->async_write)
2987                 return 0;
2988 
2989         trace_regmap_async_complete_start(map);
2990 
2991         wait_event(map->async_waitq, regmap_async_is_done(map));
2992 
2993         spin_lock_irqsave(&map->async_lock, flags);
2994         ret = map->async_ret;
2995         map->async_ret = 0;
2996         spin_unlock_irqrestore(&map->async_lock, flags);
2997 
2998         trace_regmap_async_complete_done(map);
2999 
3000         return ret;
3001 }
3002 EXPORT_SYMBOL_GPL(regmap_async_complete);
3003 
3004 /**
3005  * regmap_register_patch - Register and apply register updates to be applied
3006  *                         on device initialistion
3007  *
3008  * @map: Register map to apply updates to.
3009  * @regs: Values to update.
3010  * @num_regs: Number of entries in regs.
3011  *
3012  * Register a set of register updates to be applied to the device
3013  * whenever the device registers are synchronised with the cache and
3014  * apply them immediately.  Typically this is used to apply
3015  * corrections to be applied to the device defaults on startup, such
3016  * as the updates some vendors provide to undocumented registers.
3017  *
3018  * The caller must ensure that this function cannot be called
3019  * concurrently with either itself or regcache_sync().
3020  */
3021 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3022                           int num_regs)
3023 {
3024         struct reg_sequence *p;
3025         int ret;
3026         bool bypass;
3027 
3028         if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3029             num_regs))
3030                 return 0;
3031 
3032         p = krealloc(map->patch,
3033                      sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3034                      GFP_KERNEL);
3035         if (p) {
3036                 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3037                 map->patch = p;
3038                 map->patch_regs += num_regs;
3039         } else {
3040                 return -ENOMEM;
3041         }
3042 
3043         map->lock(map->lock_arg);
3044 
3045         bypass = map->cache_bypass;
3046 
3047         map->cache_bypass = true;
3048         map->async = true;
3049 
3050         ret = _regmap_multi_reg_write(map, regs, num_regs);
3051 
3052         map->async = false;
3053         map->cache_bypass = bypass;
3054 
3055         map->unlock(map->lock_arg);
3056 
3057         regmap_async_complete(map);
3058 
3059         return ret;
3060 }
3061 EXPORT_SYMBOL_GPL(regmap_register_patch);
3062 
3063 /**
3064  * regmap_get_val_bytes() - Report the size of a register value
3065  *
3066  * @map: Register map to operate on.
3067  *
3068  * Report the size of a register value, mainly intended to for use by
3069  * generic infrastructure built on top of regmap.
3070  */
3071 int regmap_get_val_bytes(struct regmap *map)
3072 {
3073         if (map->format.format_write)
3074                 return -EINVAL;
3075 
3076         return map->format.val_bytes;
3077 }
3078 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3079 
3080 /**
3081  * regmap_get_max_register() - Report the max register value
3082  *
3083  * @map: Register map to operate on.
3084  *
3085  * Report the max register value, mainly intended to for use by
3086  * generic infrastructure built on top of regmap.
3087  */
3088 int regmap_get_max_register(struct regmap *map)
3089 {
3090         return map->max_register ? map->max_register : -EINVAL;
3091 }
3092 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3093 
3094 /**
3095  * regmap_get_reg_stride() - Report the register address stride
3096  *
3097  * @map: Register map to operate on.
3098  *
3099  * Report the register address stride, mainly intended to for use by
3100  * generic infrastructure built on top of regmap.
3101  */
3102 int regmap_get_reg_stride(struct regmap *map)
3103 {
3104         return map->reg_stride;
3105 }
3106 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3107 
3108 int regmap_parse_val(struct regmap *map, const void *buf,
3109                         unsigned int *val)
3110 {
3111         if (!map->format.parse_val)
3112                 return -EINVAL;
3113 
3114         *val = map->format.parse_val(buf);
3115 
3116         return 0;
3117 }
3118 EXPORT_SYMBOL_GPL(regmap_parse_val);
3119 
3120 static int __init regmap_initcall(void)
3121 {
3122         regmap_debugfs_initcall();
3123 
3124         return 0;
3125 }
3126 postcore_initcall(regmap_initcall);

/* [<][>][^][v][top][bottom][index][help] */