root/drivers/base/regmap/regmap-debugfs.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. regmap_calc_reg_len
  2. regmap_name_read_file
  3. regmap_debugfs_free_dump_cache
  4. regmap_printable
  5. regmap_debugfs_get_dump_start
  6. regmap_calc_tot_len
  7. regmap_next_readable_reg
  8. regmap_read_debugfs
  9. regmap_map_read_file
  10. regmap_map_write_file
  11. regmap_range_read_file
  12. regmap_reg_ranges_read_file
  13. regmap_access_show
  14. regmap_cache_only_write_file
  15. regmap_cache_bypass_write_file
  16. regmap_debugfs_init
  17. regmap_debugfs_exit
  18. regmap_debugfs_initcall

   1 // SPDX-License-Identifier: GPL-2.0
   2 //
   3 // Register map access API - debugfs
   4 //
   5 // Copyright 2011 Wolfson Microelectronics plc
   6 //
   7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
   8 
   9 #include <linux/slab.h>
  10 #include <linux/mutex.h>
  11 #include <linux/debugfs.h>
  12 #include <linux/uaccess.h>
  13 #include <linux/device.h>
  14 #include <linux/list.h>
  15 
  16 #include "internal.h"
  17 
  18 struct regmap_debugfs_node {
  19         struct regmap *map;
  20         const char *name;
  21         struct list_head link;
  22 };
  23 
  24 static unsigned int dummy_index;
  25 static struct dentry *regmap_debugfs_root;
  26 static LIST_HEAD(regmap_debugfs_early_list);
  27 static DEFINE_MUTEX(regmap_debugfs_early_lock);
  28 
  29 /* Calculate the length of a fixed format  */
  30 static size_t regmap_calc_reg_len(int max_val)
  31 {
  32         return snprintf(NULL, 0, "%x", max_val);
  33 }
  34 
  35 static ssize_t regmap_name_read_file(struct file *file,
  36                                      char __user *user_buf, size_t count,
  37                                      loff_t *ppos)
  38 {
  39         struct regmap *map = file->private_data;
  40         const char *name = "nodev";
  41         int ret;
  42         char *buf;
  43 
  44         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  45         if (!buf)
  46                 return -ENOMEM;
  47 
  48         if (map->dev && map->dev->driver)
  49                 name = map->dev->driver->name;
  50 
  51         ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
  52         if (ret < 0) {
  53                 kfree(buf);
  54                 return ret;
  55         }
  56 
  57         ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
  58         kfree(buf);
  59         return ret;
  60 }
  61 
  62 static const struct file_operations regmap_name_fops = {
  63         .open = simple_open,
  64         .read = regmap_name_read_file,
  65         .llseek = default_llseek,
  66 };
  67 
  68 static void regmap_debugfs_free_dump_cache(struct regmap *map)
  69 {
  70         struct regmap_debugfs_off_cache *c;
  71 
  72         while (!list_empty(&map->debugfs_off_cache)) {
  73                 c = list_first_entry(&map->debugfs_off_cache,
  74                                      struct regmap_debugfs_off_cache,
  75                                      list);
  76                 list_del(&c->list);
  77                 kfree(c);
  78         }
  79 }
  80 
  81 static bool regmap_printable(struct regmap *map, unsigned int reg)
  82 {
  83         if (regmap_precious(map, reg))
  84                 return false;
  85 
  86         if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
  87                 return false;
  88 
  89         return true;
  90 }
  91 
  92 /*
  93  * Work out where the start offset maps into register numbers, bearing
  94  * in mind that we suppress hidden registers.
  95  */
  96 static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
  97                                                   unsigned int base,
  98                                                   loff_t from,
  99                                                   loff_t *pos)
 100 {
 101         struct regmap_debugfs_off_cache *c = NULL;
 102         loff_t p = 0;
 103         unsigned int i, ret;
 104         unsigned int fpos_offset;
 105         unsigned int reg_offset;
 106 
 107         /* Suppress the cache if we're using a subrange */
 108         if (base)
 109                 return base;
 110 
 111         /*
 112          * If we don't have a cache build one so we don't have to do a
 113          * linear scan each time.
 114          */
 115         mutex_lock(&map->cache_lock);
 116         i = base;
 117         if (list_empty(&map->debugfs_off_cache)) {
 118                 for (; i <= map->max_register; i += map->reg_stride) {
 119                         /* Skip unprinted registers, closing off cache entry */
 120                         if (!regmap_printable(map, i)) {
 121                                 if (c) {
 122                                         c->max = p - 1;
 123                                         c->max_reg = i - map->reg_stride;
 124                                         list_add_tail(&c->list,
 125                                                       &map->debugfs_off_cache);
 126                                         c = NULL;
 127                                 }
 128 
 129                                 continue;
 130                         }
 131 
 132                         /* No cache entry?  Start a new one */
 133                         if (!c) {
 134                                 c = kzalloc(sizeof(*c), GFP_KERNEL);
 135                                 if (!c) {
 136                                         regmap_debugfs_free_dump_cache(map);
 137                                         mutex_unlock(&map->cache_lock);
 138                                         return base;
 139                                 }
 140                                 c->min = p;
 141                                 c->base_reg = i;
 142                         }
 143 
 144                         p += map->debugfs_tot_len;
 145                 }
 146         }
 147 
 148         /* Close the last entry off if we didn't scan beyond it */
 149         if (c) {
 150                 c->max = p - 1;
 151                 c->max_reg = i - map->reg_stride;
 152                 list_add_tail(&c->list,
 153                               &map->debugfs_off_cache);
 154         }
 155 
 156         /*
 157          * This should never happen; we return above if we fail to
 158          * allocate and we should never be in this code if there are
 159          * no registers at all.
 160          */
 161         WARN_ON(list_empty(&map->debugfs_off_cache));
 162         ret = base;
 163 
 164         /* Find the relevant block:offset */
 165         list_for_each_entry(c, &map->debugfs_off_cache, list) {
 166                 if (from >= c->min && from <= c->max) {
 167                         fpos_offset = from - c->min;
 168                         reg_offset = fpos_offset / map->debugfs_tot_len;
 169                         *pos = c->min + (reg_offset * map->debugfs_tot_len);
 170                         mutex_unlock(&map->cache_lock);
 171                         return c->base_reg + (reg_offset * map->reg_stride);
 172                 }
 173 
 174                 *pos = c->max;
 175                 ret = c->max_reg;
 176         }
 177         mutex_unlock(&map->cache_lock);
 178 
 179         return ret;
 180 }
 181 
 182 static inline void regmap_calc_tot_len(struct regmap *map,
 183                                        void *buf, size_t count)
 184 {
 185         /* Calculate the length of a fixed format  */
 186         if (!map->debugfs_tot_len) {
 187                 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
 188                 map->debugfs_val_len = 2 * map->format.val_bytes;
 189                 map->debugfs_tot_len = map->debugfs_reg_len +
 190                         map->debugfs_val_len + 3;      /* : \n */
 191         }
 192 }
 193 
 194 static int regmap_next_readable_reg(struct regmap *map, int reg)
 195 {
 196         struct regmap_debugfs_off_cache *c;
 197         int ret = -EINVAL;
 198 
 199         if (regmap_printable(map, reg + map->reg_stride)) {
 200                 ret = reg + map->reg_stride;
 201         } else {
 202                 mutex_lock(&map->cache_lock);
 203                 list_for_each_entry(c, &map->debugfs_off_cache, list) {
 204                         if (reg > c->max_reg)
 205                                 continue;
 206                         if (reg < c->base_reg) {
 207                                 ret = c->base_reg;
 208                                 break;
 209                         }
 210                 }
 211                 mutex_unlock(&map->cache_lock);
 212         }
 213         return ret;
 214 }
 215 
 216 static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
 217                                    unsigned int to, char __user *user_buf,
 218                                    size_t count, loff_t *ppos)
 219 {
 220         size_t buf_pos = 0;
 221         loff_t p = *ppos;
 222         ssize_t ret;
 223         int i;
 224         char *buf;
 225         unsigned int val, start_reg;
 226 
 227         if (*ppos < 0 || !count)
 228                 return -EINVAL;
 229 
 230         buf = kmalloc(count, GFP_KERNEL);
 231         if (!buf)
 232                 return -ENOMEM;
 233 
 234         regmap_calc_tot_len(map, buf, count);
 235 
 236         /* Work out which register we're starting at */
 237         start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
 238 
 239         for (i = start_reg; i >= 0 && i <= to;
 240              i = regmap_next_readable_reg(map, i)) {
 241 
 242                 /* If we're in the region the user is trying to read */
 243                 if (p >= *ppos) {
 244                         /* ...but not beyond it */
 245                         if (buf_pos + map->debugfs_tot_len > count)
 246                                 break;
 247 
 248                         /* Format the register */
 249                         snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
 250                                  map->debugfs_reg_len, i - from);
 251                         buf_pos += map->debugfs_reg_len + 2;
 252 
 253                         /* Format the value, write all X if we can't read */
 254                         ret = regmap_read(map, i, &val);
 255                         if (ret == 0)
 256                                 snprintf(buf + buf_pos, count - buf_pos,
 257                                          "%.*x", map->debugfs_val_len, val);
 258                         else
 259                                 memset(buf + buf_pos, 'X',
 260                                        map->debugfs_val_len);
 261                         buf_pos += 2 * map->format.val_bytes;
 262 
 263                         buf[buf_pos++] = '\n';
 264                 }
 265                 p += map->debugfs_tot_len;
 266         }
 267 
 268         ret = buf_pos;
 269 
 270         if (copy_to_user(user_buf, buf, buf_pos)) {
 271                 ret = -EFAULT;
 272                 goto out;
 273         }
 274 
 275         *ppos += buf_pos;
 276 
 277 out:
 278         kfree(buf);
 279         return ret;
 280 }
 281 
 282 static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
 283                                     size_t count, loff_t *ppos)
 284 {
 285         struct regmap *map = file->private_data;
 286 
 287         return regmap_read_debugfs(map, 0, map->max_register, user_buf,
 288                                    count, ppos);
 289 }
 290 
 291 #undef REGMAP_ALLOW_WRITE_DEBUGFS
 292 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
 293 /*
 294  * This can be dangerous especially when we have clients such as
 295  * PMICs, therefore don't provide any real compile time configuration option
 296  * for this feature, people who want to use this will need to modify
 297  * the source code directly.
 298  */
 299 static ssize_t regmap_map_write_file(struct file *file,
 300                                      const char __user *user_buf,
 301                                      size_t count, loff_t *ppos)
 302 {
 303         char buf[32];
 304         size_t buf_size;
 305         char *start = buf;
 306         unsigned long reg, value;
 307         struct regmap *map = file->private_data;
 308         int ret;
 309 
 310         buf_size = min(count, (sizeof(buf)-1));
 311         if (copy_from_user(buf, user_buf, buf_size))
 312                 return -EFAULT;
 313         buf[buf_size] = 0;
 314 
 315         while (*start == ' ')
 316                 start++;
 317         reg = simple_strtoul(start, &start, 16);
 318         while (*start == ' ')
 319                 start++;
 320         if (kstrtoul(start, 16, &value))
 321                 return -EINVAL;
 322 
 323         /* Userspace has been fiddling around behind the kernel's back */
 324         add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 325 
 326         ret = regmap_write(map, reg, value);
 327         if (ret < 0)
 328                 return ret;
 329         return buf_size;
 330 }
 331 #else
 332 #define regmap_map_write_file NULL
 333 #endif
 334 
 335 static const struct file_operations regmap_map_fops = {
 336         .open = simple_open,
 337         .read = regmap_map_read_file,
 338         .write = regmap_map_write_file,
 339         .llseek = default_llseek,
 340 };
 341 
 342 static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
 343                                       size_t count, loff_t *ppos)
 344 {
 345         struct regmap_range_node *range = file->private_data;
 346         struct regmap *map = range->map;
 347 
 348         return regmap_read_debugfs(map, range->range_min, range->range_max,
 349                                    user_buf, count, ppos);
 350 }
 351 
 352 static const struct file_operations regmap_range_fops = {
 353         .open = simple_open,
 354         .read = regmap_range_read_file,
 355         .llseek = default_llseek,
 356 };
 357 
 358 static ssize_t regmap_reg_ranges_read_file(struct file *file,
 359                                            char __user *user_buf, size_t count,
 360                                            loff_t *ppos)
 361 {
 362         struct regmap *map = file->private_data;
 363         struct regmap_debugfs_off_cache *c;
 364         loff_t p = 0;
 365         size_t buf_pos = 0;
 366         char *buf;
 367         char *entry;
 368         int ret;
 369         unsigned entry_len;
 370 
 371         if (*ppos < 0 || !count)
 372                 return -EINVAL;
 373 
 374         buf = kmalloc(count, GFP_KERNEL);
 375         if (!buf)
 376                 return -ENOMEM;
 377 
 378         entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
 379         if (!entry) {
 380                 kfree(buf);
 381                 return -ENOMEM;
 382         }
 383 
 384         /* While we are at it, build the register dump cache
 385          * now so the read() operation on the `registers' file
 386          * can benefit from using the cache.  We do not care
 387          * about the file position information that is contained
 388          * in the cache, just about the actual register blocks */
 389         regmap_calc_tot_len(map, buf, count);
 390         regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
 391 
 392         /* Reset file pointer as the fixed-format of the `registers'
 393          * file is not compatible with the `range' file */
 394         p = 0;
 395         mutex_lock(&map->cache_lock);
 396         list_for_each_entry(c, &map->debugfs_off_cache, list) {
 397                 entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
 398                                      c->base_reg, c->max_reg);
 399                 if (p >= *ppos) {
 400                         if (buf_pos + entry_len > count)
 401                                 break;
 402                         memcpy(buf + buf_pos, entry, entry_len);
 403                         buf_pos += entry_len;
 404                 }
 405                 p += entry_len;
 406         }
 407         mutex_unlock(&map->cache_lock);
 408 
 409         kfree(entry);
 410         ret = buf_pos;
 411 
 412         if (copy_to_user(user_buf, buf, buf_pos)) {
 413                 ret = -EFAULT;
 414                 goto out_buf;
 415         }
 416 
 417         *ppos += buf_pos;
 418 out_buf:
 419         kfree(buf);
 420         return ret;
 421 }
 422 
 423 static const struct file_operations regmap_reg_ranges_fops = {
 424         .open = simple_open,
 425         .read = regmap_reg_ranges_read_file,
 426         .llseek = default_llseek,
 427 };
 428 
 429 static int regmap_access_show(struct seq_file *s, void *ignored)
 430 {
 431         struct regmap *map = s->private;
 432         int i, reg_len;
 433 
 434         reg_len = regmap_calc_reg_len(map->max_register);
 435 
 436         for (i = 0; i <= map->max_register; i += map->reg_stride) {
 437                 /* Ignore registers which are neither readable nor writable */
 438                 if (!regmap_readable(map, i) && !regmap_writeable(map, i))
 439                         continue;
 440 
 441                 /* Format the register */
 442                 seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
 443                            regmap_readable(map, i) ? 'y' : 'n',
 444                            regmap_writeable(map, i) ? 'y' : 'n',
 445                            regmap_volatile(map, i) ? 'y' : 'n',
 446                            regmap_precious(map, i) ? 'y' : 'n');
 447         }
 448 
 449         return 0;
 450 }
 451 
 452 DEFINE_SHOW_ATTRIBUTE(regmap_access);
 453 
 454 static ssize_t regmap_cache_only_write_file(struct file *file,
 455                                             const char __user *user_buf,
 456                                             size_t count, loff_t *ppos)
 457 {
 458         struct regmap *map = container_of(file->private_data,
 459                                           struct regmap, cache_only);
 460         ssize_t result;
 461         bool was_enabled, require_sync = false;
 462         int err;
 463 
 464         map->lock(map->lock_arg);
 465 
 466         was_enabled = map->cache_only;
 467 
 468         result = debugfs_write_file_bool(file, user_buf, count, ppos);
 469         if (result < 0) {
 470                 map->unlock(map->lock_arg);
 471                 return result;
 472         }
 473 
 474         if (map->cache_only && !was_enabled) {
 475                 dev_warn(map->dev, "debugfs cache_only=Y forced\n");
 476                 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 477         } else if (!map->cache_only && was_enabled) {
 478                 dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
 479                 require_sync = true;
 480         }
 481 
 482         map->unlock(map->lock_arg);
 483 
 484         if (require_sync) {
 485                 err = regcache_sync(map);
 486                 if (err)
 487                         dev_err(map->dev, "Failed to sync cache %d\n", err);
 488         }
 489 
 490         return result;
 491 }
 492 
 493 static const struct file_operations regmap_cache_only_fops = {
 494         .open = simple_open,
 495         .read = debugfs_read_file_bool,
 496         .write = regmap_cache_only_write_file,
 497 };
 498 
 499 static ssize_t regmap_cache_bypass_write_file(struct file *file,
 500                                               const char __user *user_buf,
 501                                               size_t count, loff_t *ppos)
 502 {
 503         struct regmap *map = container_of(file->private_data,
 504                                           struct regmap, cache_bypass);
 505         ssize_t result;
 506         bool was_enabled;
 507 
 508         map->lock(map->lock_arg);
 509 
 510         was_enabled = map->cache_bypass;
 511 
 512         result = debugfs_write_file_bool(file, user_buf, count, ppos);
 513         if (result < 0)
 514                 goto out;
 515 
 516         if (map->cache_bypass && !was_enabled) {
 517                 dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
 518                 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 519         } else if (!map->cache_bypass && was_enabled) {
 520                 dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
 521         }
 522 
 523 out:
 524         map->unlock(map->lock_arg);
 525 
 526         return result;
 527 }
 528 
 529 static const struct file_operations regmap_cache_bypass_fops = {
 530         .open = simple_open,
 531         .read = debugfs_read_file_bool,
 532         .write = regmap_cache_bypass_write_file,
 533 };
 534 
 535 void regmap_debugfs_init(struct regmap *map, const char *name)
 536 {
 537         struct rb_node *next;
 538         struct regmap_range_node *range_node;
 539         const char *devname = "dummy";
 540 
 541         /*
 542          * Userspace can initiate reads from the hardware over debugfs.
 543          * Normally internal regmap structures and buffers are protected with
 544          * a mutex or a spinlock, but if the regmap owner decided to disable
 545          * all locking mechanisms, this is no longer the case. For safety:
 546          * don't create the debugfs entries if locking is disabled.
 547          */
 548         if (map->debugfs_disable) {
 549                 dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
 550                 return;
 551         }
 552 
 553         /* If we don't have the debugfs root yet, postpone init */
 554         if (!regmap_debugfs_root) {
 555                 struct regmap_debugfs_node *node;
 556                 node = kzalloc(sizeof(*node), GFP_KERNEL);
 557                 if (!node)
 558                         return;
 559                 node->map = map;
 560                 node->name = name;
 561                 mutex_lock(&regmap_debugfs_early_lock);
 562                 list_add(&node->link, &regmap_debugfs_early_list);
 563                 mutex_unlock(&regmap_debugfs_early_lock);
 564                 return;
 565         }
 566 
 567         INIT_LIST_HEAD(&map->debugfs_off_cache);
 568         mutex_init(&map->cache_lock);
 569 
 570         if (map->dev)
 571                 devname = dev_name(map->dev);
 572 
 573         if (name) {
 574                 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
 575                                               devname, name);
 576                 name = map->debugfs_name;
 577         } else {
 578                 name = devname;
 579         }
 580 
 581         if (!strcmp(name, "dummy")) {
 582                 kfree(map->debugfs_name);
 583 
 584                 map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
 585                                                 dummy_index);
 586                 name = map->debugfs_name;
 587                 dummy_index++;
 588         }
 589 
 590         map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
 591 
 592         debugfs_create_file("name", 0400, map->debugfs,
 593                             map, &regmap_name_fops);
 594 
 595         debugfs_create_file("range", 0400, map->debugfs,
 596                             map, &regmap_reg_ranges_fops);
 597 
 598         if (map->max_register || regmap_readable(map, 0)) {
 599                 umode_t registers_mode;
 600 
 601 #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
 602                 registers_mode = 0600;
 603 #else
 604                 registers_mode = 0400;
 605 #endif
 606 
 607                 debugfs_create_file("registers", registers_mode, map->debugfs,
 608                                     map, &regmap_map_fops);
 609                 debugfs_create_file("access", 0400, map->debugfs,
 610                                     map, &regmap_access_fops);
 611         }
 612 
 613         if (map->cache_type) {
 614                 debugfs_create_file("cache_only", 0600, map->debugfs,
 615                                     &map->cache_only, &regmap_cache_only_fops);
 616                 debugfs_create_bool("cache_dirty", 0400, map->debugfs,
 617                                     &map->cache_dirty);
 618                 debugfs_create_file("cache_bypass", 0600, map->debugfs,
 619                                     &map->cache_bypass,
 620                                     &regmap_cache_bypass_fops);
 621         }
 622 
 623         next = rb_first(&map->range_tree);
 624         while (next) {
 625                 range_node = rb_entry(next, struct regmap_range_node, node);
 626 
 627                 if (range_node->name)
 628                         debugfs_create_file(range_node->name, 0400,
 629                                             map->debugfs, range_node,
 630                                             &regmap_range_fops);
 631 
 632                 next = rb_next(&range_node->node);
 633         }
 634 
 635         if (map->cache_ops && map->cache_ops->debugfs_init)
 636                 map->cache_ops->debugfs_init(map);
 637 }
 638 
 639 void regmap_debugfs_exit(struct regmap *map)
 640 {
 641         if (map->debugfs) {
 642                 debugfs_remove_recursive(map->debugfs);
 643                 mutex_lock(&map->cache_lock);
 644                 regmap_debugfs_free_dump_cache(map);
 645                 mutex_unlock(&map->cache_lock);
 646                 kfree(map->debugfs_name);
 647         } else {
 648                 struct regmap_debugfs_node *node, *tmp;
 649 
 650                 mutex_lock(&regmap_debugfs_early_lock);
 651                 list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
 652                                          link) {
 653                         if (node->map == map) {
 654                                 list_del(&node->link);
 655                                 kfree(node);
 656                         }
 657                 }
 658                 mutex_unlock(&regmap_debugfs_early_lock);
 659         }
 660 }
 661 
 662 void regmap_debugfs_initcall(void)
 663 {
 664         struct regmap_debugfs_node *node, *tmp;
 665 
 666         regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
 667 
 668         mutex_lock(&regmap_debugfs_early_lock);
 669         list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
 670                 regmap_debugfs_init(node->map, node->name);
 671                 list_del(&node->link);
 672                 kfree(node);
 673         }
 674         mutex_unlock(&regmap_debugfs_early_lock);
 675 }

/* [<][>][^][v][top][bottom][index][help] */