root/drivers/pci/controller/dwc/pcie-designware.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __dw_pcie_find_next_cap
  2. dw_pcie_find_capability
  3. dw_pcie_find_next_ext_capability
  4. dw_pcie_find_ext_capability
  5. dw_pcie_read
  6. dw_pcie_write
  7. dw_pcie_read_dbi
  8. dw_pcie_write_dbi
  9. dw_pcie_read_dbi2
  10. dw_pcie_write_dbi2
  11. dw_pcie_read_atu
  12. dw_pcie_write_atu
  13. dw_pcie_readl_ob_unroll
  14. dw_pcie_writel_ob_unroll
  15. dw_pcie_prog_outbound_atu_unroll
  16. dw_pcie_prog_outbound_atu
  17. dw_pcie_readl_ib_unroll
  18. dw_pcie_writel_ib_unroll
  19. dw_pcie_prog_inbound_atu_unroll
  20. dw_pcie_prog_inbound_atu
  21. dw_pcie_disable_atu
  22. dw_pcie_wait_for_link
  23. dw_pcie_link_up
  24. dw_pcie_iatu_unroll_enabled
  25. dw_pcie_setup

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Synopsys DesignWare PCIe host controller driver
   4  *
   5  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
   6  *              http://www.samsung.com
   7  *
   8  * Author: Jingoo Han <jg1.han@samsung.com>
   9  */
  10 
  11 #include <linux/delay.h>
  12 #include <linux/of.h>
  13 #include <linux/types.h>
  14 
  15 #include "pcie-designware.h"
  16 
  17 /*
  18  * These interfaces resemble the pci_find_*capability() interfaces, but these
  19  * are for configuring host controllers, which are bridges *to* PCI devices but
  20  * are not PCI devices themselves.
  21  */
  22 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
  23                                   u8 cap)
  24 {
  25         u8 cap_id, next_cap_ptr;
  26         u16 reg;
  27 
  28         if (!cap_ptr)
  29                 return 0;
  30 
  31         reg = dw_pcie_readw_dbi(pci, cap_ptr);
  32         cap_id = (reg & 0x00ff);
  33 
  34         if (cap_id > PCI_CAP_ID_MAX)
  35                 return 0;
  36 
  37         if (cap_id == cap)
  38                 return cap_ptr;
  39 
  40         next_cap_ptr = (reg & 0xff00) >> 8;
  41         return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
  42 }
  43 
  44 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
  45 {
  46         u8 next_cap_ptr;
  47         u16 reg;
  48 
  49         reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
  50         next_cap_ptr = (reg & 0x00ff);
  51 
  52         return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
  53 }
  54 EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
  55 
  56 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
  57                                             u8 cap)
  58 {
  59         u32 header;
  60         int ttl;
  61         int pos = PCI_CFG_SPACE_SIZE;
  62 
  63         /* minimum 8 bytes per capability */
  64         ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
  65 
  66         if (start)
  67                 pos = start;
  68 
  69         header = dw_pcie_readl_dbi(pci, pos);
  70         /*
  71          * If we have no capabilities, this is indicated by cap ID,
  72          * cap version and next pointer all being 0.
  73          */
  74         if (header == 0)
  75                 return 0;
  76 
  77         while (ttl-- > 0) {
  78                 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
  79                         return pos;
  80 
  81                 pos = PCI_EXT_CAP_NEXT(header);
  82                 if (pos < PCI_CFG_SPACE_SIZE)
  83                         break;
  84 
  85                 header = dw_pcie_readl_dbi(pci, pos);
  86         }
  87 
  88         return 0;
  89 }
  90 
  91 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
  92 {
  93         return dw_pcie_find_next_ext_capability(pci, 0, cap);
  94 }
  95 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
  96 
  97 int dw_pcie_read(void __iomem *addr, int size, u32 *val)
  98 {
  99         if (!IS_ALIGNED((uintptr_t)addr, size)) {
 100                 *val = 0;
 101                 return PCIBIOS_BAD_REGISTER_NUMBER;
 102         }
 103 
 104         if (size == 4) {
 105                 *val = readl(addr);
 106         } else if (size == 2) {
 107                 *val = readw(addr);
 108         } else if (size == 1) {
 109                 *val = readb(addr);
 110         } else {
 111                 *val = 0;
 112                 return PCIBIOS_BAD_REGISTER_NUMBER;
 113         }
 114 
 115         return PCIBIOS_SUCCESSFUL;
 116 }
 117 EXPORT_SYMBOL_GPL(dw_pcie_read);
 118 
 119 int dw_pcie_write(void __iomem *addr, int size, u32 val)
 120 {
 121         if (!IS_ALIGNED((uintptr_t)addr, size))
 122                 return PCIBIOS_BAD_REGISTER_NUMBER;
 123 
 124         if (size == 4)
 125                 writel(val, addr);
 126         else if (size == 2)
 127                 writew(val, addr);
 128         else if (size == 1)
 129                 writeb(val, addr);
 130         else
 131                 return PCIBIOS_BAD_REGISTER_NUMBER;
 132 
 133         return PCIBIOS_SUCCESSFUL;
 134 }
 135 EXPORT_SYMBOL_GPL(dw_pcie_write);
 136 
 137 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
 138 {
 139         int ret;
 140         u32 val;
 141 
 142         if (pci->ops->read_dbi)
 143                 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
 144 
 145         ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
 146         if (ret)
 147                 dev_err(pci->dev, "Read DBI address failed\n");
 148 
 149         return val;
 150 }
 151 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
 152 
 153 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
 154 {
 155         int ret;
 156 
 157         if (pci->ops->write_dbi) {
 158                 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
 159                 return;
 160         }
 161 
 162         ret = dw_pcie_write(pci->dbi_base + reg, size, val);
 163         if (ret)
 164                 dev_err(pci->dev, "Write DBI address failed\n");
 165 }
 166 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
 167 
 168 u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
 169 {
 170         int ret;
 171         u32 val;
 172 
 173         if (pci->ops->read_dbi2)
 174                 return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
 175 
 176         ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
 177         if (ret)
 178                 dev_err(pci->dev, "read DBI address failed\n");
 179 
 180         return val;
 181 }
 182 
 183 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
 184 {
 185         int ret;
 186 
 187         if (pci->ops->write_dbi2) {
 188                 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
 189                 return;
 190         }
 191 
 192         ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
 193         if (ret)
 194                 dev_err(pci->dev, "write DBI address failed\n");
 195 }
 196 
 197 u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
 198 {
 199         int ret;
 200         u32 val;
 201 
 202         if (pci->ops->read_dbi)
 203                 return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
 204 
 205         ret = dw_pcie_read(pci->atu_base + reg, size, &val);
 206         if (ret)
 207                 dev_err(pci->dev, "Read ATU address failed\n");
 208 
 209         return val;
 210 }
 211 
 212 void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
 213 {
 214         int ret;
 215 
 216         if (pci->ops->write_dbi) {
 217                 pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
 218                 return;
 219         }
 220 
 221         ret = dw_pcie_write(pci->atu_base + reg, size, val);
 222         if (ret)
 223                 dev_err(pci->dev, "Write ATU address failed\n");
 224 }
 225 
 226 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
 227 {
 228         u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 229 
 230         return dw_pcie_readl_atu(pci, offset + reg);
 231 }
 232 
 233 static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
 234                                      u32 val)
 235 {
 236         u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
 237 
 238         dw_pcie_writel_atu(pci, offset + reg, val);
 239 }
 240 
 241 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
 242                                              int type, u64 cpu_addr,
 243                                              u64 pci_addr, u32 size)
 244 {
 245         u32 retries, val;
 246 
 247         dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
 248                                  lower_32_bits(cpu_addr));
 249         dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
 250                                  upper_32_bits(cpu_addr));
 251         dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
 252                                  lower_32_bits(cpu_addr + size - 1));
 253         dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
 254                                  lower_32_bits(pci_addr));
 255         dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
 256                                  upper_32_bits(pci_addr));
 257         dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
 258                                  type);
 259         dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
 260                                  PCIE_ATU_ENABLE);
 261 
 262         /*
 263          * Make sure ATU enable takes effect before any subsequent config
 264          * and I/O accesses.
 265          */
 266         for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 267                 val = dw_pcie_readl_ob_unroll(pci, index,
 268                                               PCIE_ATU_UNR_REGION_CTRL2);
 269                 if (val & PCIE_ATU_ENABLE)
 270                         return;
 271 
 272                 mdelay(LINK_WAIT_IATU);
 273         }
 274         dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 275 }
 276 
 277 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
 278                                u64 cpu_addr, u64 pci_addr, u32 size)
 279 {
 280         u32 retries, val;
 281 
 282         if (pci->ops->cpu_addr_fixup)
 283                 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
 284 
 285         if (pci->iatu_unroll_enabled) {
 286                 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
 287                                                  pci_addr, size);
 288                 return;
 289         }
 290 
 291         dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
 292                            PCIE_ATU_REGION_OUTBOUND | index);
 293         dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
 294                            lower_32_bits(cpu_addr));
 295         dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
 296                            upper_32_bits(cpu_addr));
 297         dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
 298                            lower_32_bits(cpu_addr + size - 1));
 299         dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
 300                            lower_32_bits(pci_addr));
 301         dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
 302                            upper_32_bits(pci_addr));
 303         dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
 304         dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
 305 
 306         /*
 307          * Make sure ATU enable takes effect before any subsequent config
 308          * and I/O accesses.
 309          */
 310         for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 311                 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
 312                 if (val & PCIE_ATU_ENABLE)
 313                         return;
 314 
 315                 mdelay(LINK_WAIT_IATU);
 316         }
 317         dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 318 }
 319 
 320 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
 321 {
 322         u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
 323 
 324         return dw_pcie_readl_atu(pci, offset + reg);
 325 }
 326 
 327 static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
 328                                      u32 val)
 329 {
 330         u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
 331 
 332         dw_pcie_writel_atu(pci, offset + reg, val);
 333 }
 334 
 335 static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
 336                                            int bar, u64 cpu_addr,
 337                                            enum dw_pcie_as_type as_type)
 338 {
 339         int type;
 340         u32 retries, val;
 341 
 342         dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
 343                                  lower_32_bits(cpu_addr));
 344         dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
 345                                  upper_32_bits(cpu_addr));
 346 
 347         switch (as_type) {
 348         case DW_PCIE_AS_MEM:
 349                 type = PCIE_ATU_TYPE_MEM;
 350                 break;
 351         case DW_PCIE_AS_IO:
 352                 type = PCIE_ATU_TYPE_IO;
 353                 break;
 354         default:
 355                 return -EINVAL;
 356         }
 357 
 358         dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
 359         dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
 360                                  PCIE_ATU_ENABLE |
 361                                  PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
 362 
 363         /*
 364          * Make sure ATU enable takes effect before any subsequent config
 365          * and I/O accesses.
 366          */
 367         for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 368                 val = dw_pcie_readl_ib_unroll(pci, index,
 369                                               PCIE_ATU_UNR_REGION_CTRL2);
 370                 if (val & PCIE_ATU_ENABLE)
 371                         return 0;
 372 
 373                 mdelay(LINK_WAIT_IATU);
 374         }
 375         dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 376 
 377         return -EBUSY;
 378 }
 379 
 380 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
 381                              u64 cpu_addr, enum dw_pcie_as_type as_type)
 382 {
 383         int type;
 384         u32 retries, val;
 385 
 386         if (pci->iatu_unroll_enabled)
 387                 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
 388                                                        cpu_addr, as_type);
 389 
 390         dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
 391                            index);
 392         dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
 393         dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
 394 
 395         switch (as_type) {
 396         case DW_PCIE_AS_MEM:
 397                 type = PCIE_ATU_TYPE_MEM;
 398                 break;
 399         case DW_PCIE_AS_IO:
 400                 type = PCIE_ATU_TYPE_IO;
 401                 break;
 402         default:
 403                 return -EINVAL;
 404         }
 405 
 406         dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
 407         dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
 408                            | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
 409 
 410         /*
 411          * Make sure ATU enable takes effect before any subsequent config
 412          * and I/O accesses.
 413          */
 414         for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
 415                 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
 416                 if (val & PCIE_ATU_ENABLE)
 417                         return 0;
 418 
 419                 mdelay(LINK_WAIT_IATU);
 420         }
 421         dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 422 
 423         return -EBUSY;
 424 }
 425 
 426 void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
 427                          enum dw_pcie_region_type type)
 428 {
 429         int region;
 430 
 431         switch (type) {
 432         case DW_PCIE_REGION_INBOUND:
 433                 region = PCIE_ATU_REGION_INBOUND;
 434                 break;
 435         case DW_PCIE_REGION_OUTBOUND:
 436                 region = PCIE_ATU_REGION_OUTBOUND;
 437                 break;
 438         default:
 439                 return;
 440         }
 441 
 442         dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
 443         dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
 444 }
 445 
 446 int dw_pcie_wait_for_link(struct dw_pcie *pci)
 447 {
 448         int retries;
 449 
 450         /* Check if the link is up or not */
 451         for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
 452                 if (dw_pcie_link_up(pci)) {
 453                         dev_info(pci->dev, "Link up\n");
 454                         return 0;
 455                 }
 456                 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
 457         }
 458 
 459         dev_info(pci->dev, "Phy link never came up\n");
 460 
 461         return -ETIMEDOUT;
 462 }
 463 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
 464 
 465 int dw_pcie_link_up(struct dw_pcie *pci)
 466 {
 467         u32 val;
 468 
 469         if (pci->ops->link_up)
 470                 return pci->ops->link_up(pci);
 471 
 472         val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
 473         return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
 474                 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
 475 }
 476 
 477 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
 478 {
 479         u32 val;
 480 
 481         val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
 482         if (val == 0xffffffff)
 483                 return 1;
 484 
 485         return 0;
 486 }
 487 
 488 void dw_pcie_setup(struct dw_pcie *pci)
 489 {
 490         int ret;
 491         u32 val;
 492         u32 lanes;
 493         struct device *dev = pci->dev;
 494         struct device_node *np = dev->of_node;
 495 
 496         if (pci->version >= 0x480A || (!pci->version &&
 497                                        dw_pcie_iatu_unroll_enabled(pci))) {
 498                 pci->iatu_unroll_enabled = true;
 499                 if (!pci->atu_base)
 500                         pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
 501         }
 502         dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
 503                 "enabled" : "disabled");
 504 
 505 
 506         ret = of_property_read_u32(np, "num-lanes", &lanes);
 507         if (ret) {
 508                 dev_dbg(pci->dev, "property num-lanes isn't found\n");
 509                 return;
 510         }
 511 
 512         /* Set the number of lanes */
 513         val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
 514         val &= ~PORT_LINK_MODE_MASK;
 515         switch (lanes) {
 516         case 1:
 517                 val |= PORT_LINK_MODE_1_LANES;
 518                 break;
 519         case 2:
 520                 val |= PORT_LINK_MODE_2_LANES;
 521                 break;
 522         case 4:
 523                 val |= PORT_LINK_MODE_4_LANES;
 524                 break;
 525         case 8:
 526                 val |= PORT_LINK_MODE_8_LANES;
 527                 break;
 528         default:
 529                 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
 530                 return;
 531         }
 532         dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
 533 
 534         /* Set link width speed control register */
 535         val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
 536         val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
 537         switch (lanes) {
 538         case 1:
 539                 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
 540                 break;
 541         case 2:
 542                 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
 543                 break;
 544         case 4:
 545                 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
 546                 break;
 547         case 8:
 548                 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
 549                 break;
 550         }
 551         dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
 552 
 553         if (of_property_read_bool(np, "snps,enable-cdm-check")) {
 554                 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
 555                 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
 556                        PCIE_PL_CHK_REG_CHK_REG_START;
 557                 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
 558         }
 559 }

/* [<][>][^][v][top][bottom][index][help] */