root/drivers/spi/spi.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. spidev_release
  2. modalias_show
  3. driver_override_store
  4. driver_override_show
  5. spi_statistics_add_transfer_stats
  6. spi_match_id
  7. spi_get_device_id
  8. spi_match_device
  9. spi_uevent
  10. spi_drv_probe
  11. spi_drv_remove
  12. spi_drv_shutdown
  13. __spi_register_driver
  14. spi_alloc_device
  15. spi_dev_set_name
  16. spi_dev_check
  17. spi_add_device
  18. spi_new_device
  19. spi_unregister_device
  20. spi_match_controller_to_boardinfo
  21. spi_register_board_info
  22. spi_set_cs
  23. spi_map_buf
  24. spi_unmap_buf
  25. __spi_map_msg
  26. __spi_unmap_msg
  27. __spi_map_msg
  28. __spi_unmap_msg
  29. spi_unmap_msg
  30. spi_map_msg
  31. spi_transfer_wait
  32. _spi_transfer_delay_ns
  33. _spi_transfer_cs_change_delay
  34. spi_transfer_one_message
  35. spi_finalize_current_transfer
  36. __spi_pump_messages
  37. spi_pump_messages
  38. spi_set_thread_rt
  39. spi_init_queue
  40. spi_get_next_queued_message
  41. spi_finalize_current_message
  42. spi_start_queue
  43. spi_stop_queue
  44. spi_destroy_queue
  45. __spi_queued_transfer
  46. spi_queued_transfer
  47. spi_controller_initialize_queue
  48. spi_flush_queue
  49. of_spi_parse_dt
  50. of_register_spi_device
  51. of_register_spi_devices
  52. of_register_spi_devices
  53. acpi_spi_parse_apple_properties
  54. acpi_spi_add_resource
  55. acpi_register_spi_device
  56. acpi_spi_add_device
  57. acpi_register_spi_devices
  58. acpi_register_spi_devices
  59. spi_controller_release
  60. spi_slave_abort
  61. match_true
  62. slave_show
  63. slave_store
  64. __spi_alloc_controller
  65. of_spi_get_gpio_numbers
  66. of_spi_get_gpio_numbers
  67. spi_get_gpio_descs
  68. spi_controller_check_ops
  69. spi_register_controller
  70. devm_spi_unregister
  71. devm_spi_register_controller
  72. __unregister
  73. spi_unregister_controller
  74. spi_controller_suspend
  75. spi_controller_resume
  76. __spi_controller_match
  77. spi_busnum_to_master
  78. spi_res_alloc
  79. spi_res_free
  80. spi_res_add
  81. spi_res_release
  82. __spi_replace_transfers_release
  83. spi_replace_transfers
  84. __spi_split_transfer_maxsize
  85. spi_split_transfers_maxsize
  86. __spi_validate_bits_per_word
  87. spi_setup
  88. spi_set_cs_timing
  89. __spi_validate
  90. __spi_async
  91. spi_async
  92. spi_async_locked
  93. spi_complete
  94. __spi_sync
  95. spi_sync
  96. spi_sync_locked
  97. spi_bus_lock
  98. spi_bus_unlock
  99. spi_write_then_read
  100. of_find_spi_device_by_node
  101. of_find_spi_controller_by_node
  102. of_spi_notify
  103. spi_acpi_controller_match
  104. acpi_spi_find_controller_by_adev
  105. acpi_spi_find_device_by_adev
  106. acpi_spi_notify
  107. spi_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 // SPI init/core code
   3 //
   4 // Copyright (C) 2005 David Brownell
   5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
   6 
   7 #include <linux/kernel.h>
   8 #include <linux/device.h>
   9 #include <linux/init.h>
  10 #include <linux/cache.h>
  11 #include <linux/dma-mapping.h>
  12 #include <linux/dmaengine.h>
  13 #include <linux/mutex.h>
  14 #include <linux/of_device.h>
  15 #include <linux/of_irq.h>
  16 #include <linux/clk/clk-conf.h>
  17 #include <linux/slab.h>
  18 #include <linux/mod_devicetable.h>
  19 #include <linux/spi/spi.h>
  20 #include <linux/spi/spi-mem.h>
  21 #include <linux/of_gpio.h>
  22 #include <linux/gpio/consumer.h>
  23 #include <linux/pm_runtime.h>
  24 #include <linux/pm_domain.h>
  25 #include <linux/property.h>
  26 #include <linux/export.h>
  27 #include <linux/sched/rt.h>
  28 #include <uapi/linux/sched/types.h>
  29 #include <linux/delay.h>
  30 #include <linux/kthread.h>
  31 #include <linux/ioport.h>
  32 #include <linux/acpi.h>
  33 #include <linux/highmem.h>
  34 #include <linux/idr.h>
  35 #include <linux/platform_data/x86/apple.h>
  36 
  37 #define CREATE_TRACE_POINTS
  38 #include <trace/events/spi.h>
  39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
  40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
  41 
  42 #include "internals.h"
  43 
  44 static DEFINE_IDR(spi_master_idr);
  45 
  46 static void spidev_release(struct device *dev)
  47 {
  48         struct spi_device       *spi = to_spi_device(dev);
  49 
  50         /* spi controllers may cleanup for released devices */
  51         if (spi->controller->cleanup)
  52                 spi->controller->cleanup(spi);
  53 
  54         spi_controller_put(spi->controller);
  55         kfree(spi->driver_override);
  56         kfree(spi);
  57 }
  58 
  59 static ssize_t
  60 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
  61 {
  62         const struct spi_device *spi = to_spi_device(dev);
  63         int len;
  64 
  65         len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
  66         if (len != -ENODEV)
  67                 return len;
  68 
  69         return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
  70 }
  71 static DEVICE_ATTR_RO(modalias);
  72 
  73 static ssize_t driver_override_store(struct device *dev,
  74                                      struct device_attribute *a,
  75                                      const char *buf, size_t count)
  76 {
  77         struct spi_device *spi = to_spi_device(dev);
  78         const char *end = memchr(buf, '\n', count);
  79         const size_t len = end ? end - buf : count;
  80         const char *driver_override, *old;
  81 
  82         /* We need to keep extra room for a newline when displaying value */
  83         if (len >= (PAGE_SIZE - 1))
  84                 return -EINVAL;
  85 
  86         driver_override = kstrndup(buf, len, GFP_KERNEL);
  87         if (!driver_override)
  88                 return -ENOMEM;
  89 
  90         device_lock(dev);
  91         old = spi->driver_override;
  92         if (len) {
  93                 spi->driver_override = driver_override;
  94         } else {
  95                 /* Emptry string, disable driver override */
  96                 spi->driver_override = NULL;
  97                 kfree(driver_override);
  98         }
  99         device_unlock(dev);
 100         kfree(old);
 101 
 102         return count;
 103 }
 104 
 105 static ssize_t driver_override_show(struct device *dev,
 106                                     struct device_attribute *a, char *buf)
 107 {
 108         const struct spi_device *spi = to_spi_device(dev);
 109         ssize_t len;
 110 
 111         device_lock(dev);
 112         len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
 113         device_unlock(dev);
 114         return len;
 115 }
 116 static DEVICE_ATTR_RW(driver_override);
 117 
 118 #define SPI_STATISTICS_ATTRS(field, file)                               \
 119 static ssize_t spi_controller_##field##_show(struct device *dev,        \
 120                                              struct device_attribute *attr, \
 121                                              char *buf)                 \
 122 {                                                                       \
 123         struct spi_controller *ctlr = container_of(dev,                 \
 124                                          struct spi_controller, dev);   \
 125         return spi_statistics_##field##_show(&ctlr->statistics, buf);   \
 126 }                                                                       \
 127 static struct device_attribute dev_attr_spi_controller_##field = {      \
 128         .attr = { .name = file, .mode = 0444 },                         \
 129         .show = spi_controller_##field##_show,                          \
 130 };                                                                      \
 131 static ssize_t spi_device_##field##_show(struct device *dev,            \
 132                                          struct device_attribute *attr, \
 133                                         char *buf)                      \
 134 {                                                                       \
 135         struct spi_device *spi = to_spi_device(dev);                    \
 136         return spi_statistics_##field##_show(&spi->statistics, buf);    \
 137 }                                                                       \
 138 static struct device_attribute dev_attr_spi_device_##field = {          \
 139         .attr = { .name = file, .mode = 0444 },                         \
 140         .show = spi_device_##field##_show,                              \
 141 }
 142 
 143 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)      \
 144 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
 145                                             char *buf)                  \
 146 {                                                                       \
 147         unsigned long flags;                                            \
 148         ssize_t len;                                                    \
 149         spin_lock_irqsave(&stat->lock, flags);                          \
 150         len = sprintf(buf, format_string, stat->field);                 \
 151         spin_unlock_irqrestore(&stat->lock, flags);                     \
 152         return len;                                                     \
 153 }                                                                       \
 154 SPI_STATISTICS_ATTRS(name, file)
 155 
 156 #define SPI_STATISTICS_SHOW(field, format_string)                       \
 157         SPI_STATISTICS_SHOW_NAME(field, __stringify(field),             \
 158                                  field, format_string)
 159 
 160 SPI_STATISTICS_SHOW(messages, "%lu");
 161 SPI_STATISTICS_SHOW(transfers, "%lu");
 162 SPI_STATISTICS_SHOW(errors, "%lu");
 163 SPI_STATISTICS_SHOW(timedout, "%lu");
 164 
 165 SPI_STATISTICS_SHOW(spi_sync, "%lu");
 166 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
 167 SPI_STATISTICS_SHOW(spi_async, "%lu");
 168 
 169 SPI_STATISTICS_SHOW(bytes, "%llu");
 170 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
 171 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
 172 
 173 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)              \
 174         SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,           \
 175                                  "transfer_bytes_histo_" number,        \
 176                                  transfer_bytes_histo[index],  "%lu")
 177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
 178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
 179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
 180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
 181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
 182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
 183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
 184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
 185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
 186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
 187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
 188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
 189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
 190 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
 191 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
 192 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
 193 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
 194 
 195 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
 196 
 197 static struct attribute *spi_dev_attrs[] = {
 198         &dev_attr_modalias.attr,
 199         &dev_attr_driver_override.attr,
 200         NULL,
 201 };
 202 
 203 static const struct attribute_group spi_dev_group = {
 204         .attrs  = spi_dev_attrs,
 205 };
 206 
 207 static struct attribute *spi_device_statistics_attrs[] = {
 208         &dev_attr_spi_device_messages.attr,
 209         &dev_attr_spi_device_transfers.attr,
 210         &dev_attr_spi_device_errors.attr,
 211         &dev_attr_spi_device_timedout.attr,
 212         &dev_attr_spi_device_spi_sync.attr,
 213         &dev_attr_spi_device_spi_sync_immediate.attr,
 214         &dev_attr_spi_device_spi_async.attr,
 215         &dev_attr_spi_device_bytes.attr,
 216         &dev_attr_spi_device_bytes_rx.attr,
 217         &dev_attr_spi_device_bytes_tx.attr,
 218         &dev_attr_spi_device_transfer_bytes_histo0.attr,
 219         &dev_attr_spi_device_transfer_bytes_histo1.attr,
 220         &dev_attr_spi_device_transfer_bytes_histo2.attr,
 221         &dev_attr_spi_device_transfer_bytes_histo3.attr,
 222         &dev_attr_spi_device_transfer_bytes_histo4.attr,
 223         &dev_attr_spi_device_transfer_bytes_histo5.attr,
 224         &dev_attr_spi_device_transfer_bytes_histo6.attr,
 225         &dev_attr_spi_device_transfer_bytes_histo7.attr,
 226         &dev_attr_spi_device_transfer_bytes_histo8.attr,
 227         &dev_attr_spi_device_transfer_bytes_histo9.attr,
 228         &dev_attr_spi_device_transfer_bytes_histo10.attr,
 229         &dev_attr_spi_device_transfer_bytes_histo11.attr,
 230         &dev_attr_spi_device_transfer_bytes_histo12.attr,
 231         &dev_attr_spi_device_transfer_bytes_histo13.attr,
 232         &dev_attr_spi_device_transfer_bytes_histo14.attr,
 233         &dev_attr_spi_device_transfer_bytes_histo15.attr,
 234         &dev_attr_spi_device_transfer_bytes_histo16.attr,
 235         &dev_attr_spi_device_transfers_split_maxsize.attr,
 236         NULL,
 237 };
 238 
 239 static const struct attribute_group spi_device_statistics_group = {
 240         .name  = "statistics",
 241         .attrs  = spi_device_statistics_attrs,
 242 };
 243 
 244 static const struct attribute_group *spi_dev_groups[] = {
 245         &spi_dev_group,
 246         &spi_device_statistics_group,
 247         NULL,
 248 };
 249 
 250 static struct attribute *spi_controller_statistics_attrs[] = {
 251         &dev_attr_spi_controller_messages.attr,
 252         &dev_attr_spi_controller_transfers.attr,
 253         &dev_attr_spi_controller_errors.attr,
 254         &dev_attr_spi_controller_timedout.attr,
 255         &dev_attr_spi_controller_spi_sync.attr,
 256         &dev_attr_spi_controller_spi_sync_immediate.attr,
 257         &dev_attr_spi_controller_spi_async.attr,
 258         &dev_attr_spi_controller_bytes.attr,
 259         &dev_attr_spi_controller_bytes_rx.attr,
 260         &dev_attr_spi_controller_bytes_tx.attr,
 261         &dev_attr_spi_controller_transfer_bytes_histo0.attr,
 262         &dev_attr_spi_controller_transfer_bytes_histo1.attr,
 263         &dev_attr_spi_controller_transfer_bytes_histo2.attr,
 264         &dev_attr_spi_controller_transfer_bytes_histo3.attr,
 265         &dev_attr_spi_controller_transfer_bytes_histo4.attr,
 266         &dev_attr_spi_controller_transfer_bytes_histo5.attr,
 267         &dev_attr_spi_controller_transfer_bytes_histo6.attr,
 268         &dev_attr_spi_controller_transfer_bytes_histo7.attr,
 269         &dev_attr_spi_controller_transfer_bytes_histo8.attr,
 270         &dev_attr_spi_controller_transfer_bytes_histo9.attr,
 271         &dev_attr_spi_controller_transfer_bytes_histo10.attr,
 272         &dev_attr_spi_controller_transfer_bytes_histo11.attr,
 273         &dev_attr_spi_controller_transfer_bytes_histo12.attr,
 274         &dev_attr_spi_controller_transfer_bytes_histo13.attr,
 275         &dev_attr_spi_controller_transfer_bytes_histo14.attr,
 276         &dev_attr_spi_controller_transfer_bytes_histo15.attr,
 277         &dev_attr_spi_controller_transfer_bytes_histo16.attr,
 278         &dev_attr_spi_controller_transfers_split_maxsize.attr,
 279         NULL,
 280 };
 281 
 282 static const struct attribute_group spi_controller_statistics_group = {
 283         .name  = "statistics",
 284         .attrs  = spi_controller_statistics_attrs,
 285 };
 286 
 287 static const struct attribute_group *spi_master_groups[] = {
 288         &spi_controller_statistics_group,
 289         NULL,
 290 };
 291 
 292 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
 293                                        struct spi_transfer *xfer,
 294                                        struct spi_controller *ctlr)
 295 {
 296         unsigned long flags;
 297         int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
 298 
 299         if (l2len < 0)
 300                 l2len = 0;
 301 
 302         spin_lock_irqsave(&stats->lock, flags);
 303 
 304         stats->transfers++;
 305         stats->transfer_bytes_histo[l2len]++;
 306 
 307         stats->bytes += xfer->len;
 308         if ((xfer->tx_buf) &&
 309             (xfer->tx_buf != ctlr->dummy_tx))
 310                 stats->bytes_tx += xfer->len;
 311         if ((xfer->rx_buf) &&
 312             (xfer->rx_buf != ctlr->dummy_rx))
 313                 stats->bytes_rx += xfer->len;
 314 
 315         spin_unlock_irqrestore(&stats->lock, flags);
 316 }
 317 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
 318 
 319 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 320  * and the sysfs version makes coldplug work too.
 321  */
 322 
 323 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
 324                                                 const struct spi_device *sdev)
 325 {
 326         while (id->name[0]) {
 327                 if (!strcmp(sdev->modalias, id->name))
 328                         return id;
 329                 id++;
 330         }
 331         return NULL;
 332 }
 333 
 334 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
 335 {
 336         const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
 337 
 338         return spi_match_id(sdrv->id_table, sdev);
 339 }
 340 EXPORT_SYMBOL_GPL(spi_get_device_id);
 341 
 342 static int spi_match_device(struct device *dev, struct device_driver *drv)
 343 {
 344         const struct spi_device *spi = to_spi_device(dev);
 345         const struct spi_driver *sdrv = to_spi_driver(drv);
 346 
 347         /* Check override first, and if set, only use the named driver */
 348         if (spi->driver_override)
 349                 return strcmp(spi->driver_override, drv->name) == 0;
 350 
 351         /* Attempt an OF style match */
 352         if (of_driver_match_device(dev, drv))
 353                 return 1;
 354 
 355         /* Then try ACPI */
 356         if (acpi_driver_match_device(dev, drv))
 357                 return 1;
 358 
 359         if (sdrv->id_table)
 360                 return !!spi_match_id(sdrv->id_table, spi);
 361 
 362         return strcmp(spi->modalias, drv->name) == 0;
 363 }
 364 
 365 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
 366 {
 367         const struct spi_device         *spi = to_spi_device(dev);
 368         int rc;
 369 
 370         rc = acpi_device_uevent_modalias(dev, env);
 371         if (rc != -ENODEV)
 372                 return rc;
 373 
 374         return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
 375 }
 376 
 377 struct bus_type spi_bus_type = {
 378         .name           = "spi",
 379         .dev_groups     = spi_dev_groups,
 380         .match          = spi_match_device,
 381         .uevent         = spi_uevent,
 382 };
 383 EXPORT_SYMBOL_GPL(spi_bus_type);
 384 
 385 
 386 static int spi_drv_probe(struct device *dev)
 387 {
 388         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 389         struct spi_device               *spi = to_spi_device(dev);
 390         int ret;
 391 
 392         ret = of_clk_set_defaults(dev->of_node, false);
 393         if (ret)
 394                 return ret;
 395 
 396         if (dev->of_node) {
 397                 spi->irq = of_irq_get(dev->of_node, 0);
 398                 if (spi->irq == -EPROBE_DEFER)
 399                         return -EPROBE_DEFER;
 400                 if (spi->irq < 0)
 401                         spi->irq = 0;
 402         }
 403 
 404         ret = dev_pm_domain_attach(dev, true);
 405         if (ret)
 406                 return ret;
 407 
 408         ret = sdrv->probe(spi);
 409         if (ret)
 410                 dev_pm_domain_detach(dev, true);
 411 
 412         return ret;
 413 }
 414 
 415 static int spi_drv_remove(struct device *dev)
 416 {
 417         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 418         int ret;
 419 
 420         ret = sdrv->remove(to_spi_device(dev));
 421         dev_pm_domain_detach(dev, true);
 422 
 423         return ret;
 424 }
 425 
 426 static void spi_drv_shutdown(struct device *dev)
 427 {
 428         const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 429 
 430         sdrv->shutdown(to_spi_device(dev));
 431 }
 432 
 433 /**
 434  * __spi_register_driver - register a SPI driver
 435  * @owner: owner module of the driver to register
 436  * @sdrv: the driver to register
 437  * Context: can sleep
 438  *
 439  * Return: zero on success, else a negative error code.
 440  */
 441 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
 442 {
 443         sdrv->driver.owner = owner;
 444         sdrv->driver.bus = &spi_bus_type;
 445         if (sdrv->probe)
 446                 sdrv->driver.probe = spi_drv_probe;
 447         if (sdrv->remove)
 448                 sdrv->driver.remove = spi_drv_remove;
 449         if (sdrv->shutdown)
 450                 sdrv->driver.shutdown = spi_drv_shutdown;
 451         return driver_register(&sdrv->driver);
 452 }
 453 EXPORT_SYMBOL_GPL(__spi_register_driver);
 454 
 455 /*-------------------------------------------------------------------------*/
 456 
 457 /* SPI devices should normally not be created by SPI device drivers; that
 458  * would make them board-specific.  Similarly with SPI controller drivers.
 459  * Device registration normally goes into like arch/.../mach.../board-YYY.c
 460  * with other readonly (flashable) information about mainboard devices.
 461  */
 462 
 463 struct boardinfo {
 464         struct list_head        list;
 465         struct spi_board_info   board_info;
 466 };
 467 
 468 static LIST_HEAD(board_list);
 469 static LIST_HEAD(spi_controller_list);
 470 
 471 /*
 472  * Used to protect add/del opertion for board_info list and
 473  * spi_controller list, and their matching process
 474  * also used to protect object of type struct idr
 475  */
 476 static DEFINE_MUTEX(board_lock);
 477 
 478 /**
 479  * spi_alloc_device - Allocate a new SPI device
 480  * @ctlr: Controller to which device is connected
 481  * Context: can sleep
 482  *
 483  * Allows a driver to allocate and initialize a spi_device without
 484  * registering it immediately.  This allows a driver to directly
 485  * fill the spi_device with device parameters before calling
 486  * spi_add_device() on it.
 487  *
 488  * Caller is responsible to call spi_add_device() on the returned
 489  * spi_device structure to add it to the SPI controller.  If the caller
 490  * needs to discard the spi_device without adding it, then it should
 491  * call spi_dev_put() on it.
 492  *
 493  * Return: a pointer to the new device, or NULL.
 494  */
 495 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
 496 {
 497         struct spi_device       *spi;
 498 
 499         if (!spi_controller_get(ctlr))
 500                 return NULL;
 501 
 502         spi = kzalloc(sizeof(*spi), GFP_KERNEL);
 503         if (!spi) {
 504                 spi_controller_put(ctlr);
 505                 return NULL;
 506         }
 507 
 508         spi->master = spi->controller = ctlr;
 509         spi->dev.parent = &ctlr->dev;
 510         spi->dev.bus = &spi_bus_type;
 511         spi->dev.release = spidev_release;
 512         spi->cs_gpio = -ENOENT;
 513 
 514         spin_lock_init(&spi->statistics.lock);
 515 
 516         device_initialize(&spi->dev);
 517         return spi;
 518 }
 519 EXPORT_SYMBOL_GPL(spi_alloc_device);
 520 
 521 static void spi_dev_set_name(struct spi_device *spi)
 522 {
 523         struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
 524 
 525         if (adev) {
 526                 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
 527                 return;
 528         }
 529 
 530         dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
 531                      spi->chip_select);
 532 }
 533 
 534 static int spi_dev_check(struct device *dev, void *data)
 535 {
 536         struct spi_device *spi = to_spi_device(dev);
 537         struct spi_device *new_spi = data;
 538 
 539         if (spi->controller == new_spi->controller &&
 540             spi->chip_select == new_spi->chip_select)
 541                 return -EBUSY;
 542         return 0;
 543 }
 544 
 545 /**
 546  * spi_add_device - Add spi_device allocated with spi_alloc_device
 547  * @spi: spi_device to register
 548  *
 549  * Companion function to spi_alloc_device.  Devices allocated with
 550  * spi_alloc_device can be added onto the spi bus with this function.
 551  *
 552  * Return: 0 on success; negative errno on failure
 553  */
 554 int spi_add_device(struct spi_device *spi)
 555 {
 556         static DEFINE_MUTEX(spi_add_lock);
 557         struct spi_controller *ctlr = spi->controller;
 558         struct device *dev = ctlr->dev.parent;
 559         int status;
 560 
 561         /* Chipselects are numbered 0..max; validate. */
 562         if (spi->chip_select >= ctlr->num_chipselect) {
 563                 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
 564                         ctlr->num_chipselect);
 565                 return -EINVAL;
 566         }
 567 
 568         /* Set the bus ID string */
 569         spi_dev_set_name(spi);
 570 
 571         /* We need to make sure there's no other device with this
 572          * chipselect **BEFORE** we call setup(), else we'll trash
 573          * its configuration.  Lock against concurrent add() calls.
 574          */
 575         mutex_lock(&spi_add_lock);
 576 
 577         status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
 578         if (status) {
 579                 dev_err(dev, "chipselect %d already in use\n",
 580                                 spi->chip_select);
 581                 goto done;
 582         }
 583 
 584         /* Descriptors take precedence */
 585         if (ctlr->cs_gpiods)
 586                 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
 587         else if (ctlr->cs_gpios)
 588                 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
 589 
 590         /* Drivers may modify this initial i/o setup, but will
 591          * normally rely on the device being setup.  Devices
 592          * using SPI_CS_HIGH can't coexist well otherwise...
 593          */
 594         status = spi_setup(spi);
 595         if (status < 0) {
 596                 dev_err(dev, "can't setup %s, status %d\n",
 597                                 dev_name(&spi->dev), status);
 598                 goto done;
 599         }
 600 
 601         /* Device may be bound to an active driver when this returns */
 602         status = device_add(&spi->dev);
 603         if (status < 0)
 604                 dev_err(dev, "can't add %s, status %d\n",
 605                                 dev_name(&spi->dev), status);
 606         else
 607                 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 608 
 609 done:
 610         mutex_unlock(&spi_add_lock);
 611         return status;
 612 }
 613 EXPORT_SYMBOL_GPL(spi_add_device);
 614 
 615 /**
 616  * spi_new_device - instantiate one new SPI device
 617  * @ctlr: Controller to which device is connected
 618  * @chip: Describes the SPI device
 619  * Context: can sleep
 620  *
 621  * On typical mainboards, this is purely internal; and it's not needed
 622  * after board init creates the hard-wired devices.  Some development
 623  * platforms may not be able to use spi_register_board_info though, and
 624  * this is exported so that for example a USB or parport based adapter
 625  * driver could add devices (which it would learn about out-of-band).
 626  *
 627  * Return: the new device, or NULL.
 628  */
 629 struct spi_device *spi_new_device(struct spi_controller *ctlr,
 630                                   struct spi_board_info *chip)
 631 {
 632         struct spi_device       *proxy;
 633         int                     status;
 634 
 635         /* NOTE:  caller did any chip->bus_num checks necessary.
 636          *
 637          * Also, unless we change the return value convention to use
 638          * error-or-pointer (not NULL-or-pointer), troubleshootability
 639          * suggests syslogged diagnostics are best here (ugh).
 640          */
 641 
 642         proxy = spi_alloc_device(ctlr);
 643         if (!proxy)
 644                 return NULL;
 645 
 646         WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
 647 
 648         proxy->chip_select = chip->chip_select;
 649         proxy->max_speed_hz = chip->max_speed_hz;
 650         proxy->mode = chip->mode;
 651         proxy->irq = chip->irq;
 652         strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
 653         proxy->dev.platform_data = (void *) chip->platform_data;
 654         proxy->controller_data = chip->controller_data;
 655         proxy->controller_state = NULL;
 656 
 657         if (chip->properties) {
 658                 status = device_add_properties(&proxy->dev, chip->properties);
 659                 if (status) {
 660                         dev_err(&ctlr->dev,
 661                                 "failed to add properties to '%s': %d\n",
 662                                 chip->modalias, status);
 663                         goto err_dev_put;
 664                 }
 665         }
 666 
 667         status = spi_add_device(proxy);
 668         if (status < 0)
 669                 goto err_remove_props;
 670 
 671         return proxy;
 672 
 673 err_remove_props:
 674         if (chip->properties)
 675                 device_remove_properties(&proxy->dev);
 676 err_dev_put:
 677         spi_dev_put(proxy);
 678         return NULL;
 679 }
 680 EXPORT_SYMBOL_GPL(spi_new_device);
 681 
 682 /**
 683  * spi_unregister_device - unregister a single SPI device
 684  * @spi: spi_device to unregister
 685  *
 686  * Start making the passed SPI device vanish. Normally this would be handled
 687  * by spi_unregister_controller().
 688  */
 689 void spi_unregister_device(struct spi_device *spi)
 690 {
 691         if (!spi)
 692                 return;
 693 
 694         if (spi->dev.of_node) {
 695                 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
 696                 of_node_put(spi->dev.of_node);
 697         }
 698         if (ACPI_COMPANION(&spi->dev))
 699                 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
 700         device_unregister(&spi->dev);
 701 }
 702 EXPORT_SYMBOL_GPL(spi_unregister_device);
 703 
 704 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
 705                                               struct spi_board_info *bi)
 706 {
 707         struct spi_device *dev;
 708 
 709         if (ctlr->bus_num != bi->bus_num)
 710                 return;
 711 
 712         dev = spi_new_device(ctlr, bi);
 713         if (!dev)
 714                 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
 715                         bi->modalias);
 716 }
 717 
 718 /**
 719  * spi_register_board_info - register SPI devices for a given board
 720  * @info: array of chip descriptors
 721  * @n: how many descriptors are provided
 722  * Context: can sleep
 723  *
 724  * Board-specific early init code calls this (probably during arch_initcall)
 725  * with segments of the SPI device table.  Any device nodes are created later,
 726  * after the relevant parent SPI controller (bus_num) is defined.  We keep
 727  * this table of devices forever, so that reloading a controller driver will
 728  * not make Linux forget about these hard-wired devices.
 729  *
 730  * Other code can also call this, e.g. a particular add-on board might provide
 731  * SPI devices through its expansion connector, so code initializing that board
 732  * would naturally declare its SPI devices.
 733  *
 734  * The board info passed can safely be __initdata ... but be careful of
 735  * any embedded pointers (platform_data, etc), they're copied as-is.
 736  * Device properties are deep-copied though.
 737  *
 738  * Return: zero on success, else a negative error code.
 739  */
 740 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
 741 {
 742         struct boardinfo *bi;
 743         int i;
 744 
 745         if (!n)
 746                 return 0;
 747 
 748         bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
 749         if (!bi)
 750                 return -ENOMEM;
 751 
 752         for (i = 0; i < n; i++, bi++, info++) {
 753                 struct spi_controller *ctlr;
 754 
 755                 memcpy(&bi->board_info, info, sizeof(*info));
 756                 if (info->properties) {
 757                         bi->board_info.properties =
 758                                         property_entries_dup(info->properties);
 759                         if (IS_ERR(bi->board_info.properties))
 760                                 return PTR_ERR(bi->board_info.properties);
 761                 }
 762 
 763                 mutex_lock(&board_lock);
 764                 list_add_tail(&bi->list, &board_list);
 765                 list_for_each_entry(ctlr, &spi_controller_list, list)
 766                         spi_match_controller_to_boardinfo(ctlr,
 767                                                           &bi->board_info);
 768                 mutex_unlock(&board_lock);
 769         }
 770 
 771         return 0;
 772 }
 773 
 774 /*-------------------------------------------------------------------------*/
 775 
 776 static void spi_set_cs(struct spi_device *spi, bool enable)
 777 {
 778         if (spi->mode & SPI_CS_HIGH)
 779                 enable = !enable;
 780 
 781         if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
 782                 /*
 783                  * Honour the SPI_NO_CS flag and invert the enable line, as
 784                  * active low is default for SPI. Execution paths that handle
 785                  * polarity inversion in gpiolib (such as device tree) will
 786                  * enforce active high using the SPI_CS_HIGH resulting in a
 787                  * double inversion through the code above.
 788                  */
 789                 if (!(spi->mode & SPI_NO_CS)) {
 790                         if (spi->cs_gpiod)
 791                                 gpiod_set_value_cansleep(spi->cs_gpiod,
 792                                                          !enable);
 793                         else
 794                                 gpio_set_value_cansleep(spi->cs_gpio, !enable);
 795                 }
 796                 /* Some SPI masters need both GPIO CS & slave_select */
 797                 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
 798                     spi->controller->set_cs)
 799                         spi->controller->set_cs(spi, !enable);
 800         } else if (spi->controller->set_cs) {
 801                 spi->controller->set_cs(spi, !enable);
 802         }
 803 }
 804 
 805 #ifdef CONFIG_HAS_DMA
 806 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
 807                 struct sg_table *sgt, void *buf, size_t len,
 808                 enum dma_data_direction dir)
 809 {
 810         const bool vmalloced_buf = is_vmalloc_addr(buf);
 811         unsigned int max_seg_size = dma_get_max_seg_size(dev);
 812 #ifdef CONFIG_HIGHMEM
 813         const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
 814                                 (unsigned long)buf < (PKMAP_BASE +
 815                                         (LAST_PKMAP * PAGE_SIZE)));
 816 #else
 817         const bool kmap_buf = false;
 818 #endif
 819         int desc_len;
 820         int sgs;
 821         struct page *vm_page;
 822         struct scatterlist *sg;
 823         void *sg_buf;
 824         size_t min;
 825         int i, ret;
 826 
 827         if (vmalloced_buf || kmap_buf) {
 828                 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
 829                 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
 830         } else if (virt_addr_valid(buf)) {
 831                 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
 832                 sgs = DIV_ROUND_UP(len, desc_len);
 833         } else {
 834                 return -EINVAL;
 835         }
 836 
 837         ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
 838         if (ret != 0)
 839                 return ret;
 840 
 841         sg = &sgt->sgl[0];
 842         for (i = 0; i < sgs; i++) {
 843 
 844                 if (vmalloced_buf || kmap_buf) {
 845                         /*
 846                          * Next scatterlist entry size is the minimum between
 847                          * the desc_len and the remaining buffer length that
 848                          * fits in a page.
 849                          */
 850                         min = min_t(size_t, desc_len,
 851                                     min_t(size_t, len,
 852                                           PAGE_SIZE - offset_in_page(buf)));
 853                         if (vmalloced_buf)
 854                                 vm_page = vmalloc_to_page(buf);
 855                         else
 856                                 vm_page = kmap_to_page(buf);
 857                         if (!vm_page) {
 858                                 sg_free_table(sgt);
 859                                 return -ENOMEM;
 860                         }
 861                         sg_set_page(sg, vm_page,
 862                                     min, offset_in_page(buf));
 863                 } else {
 864                         min = min_t(size_t, len, desc_len);
 865                         sg_buf = buf;
 866                         sg_set_buf(sg, sg_buf, min);
 867                 }
 868 
 869                 buf += min;
 870                 len -= min;
 871                 sg = sg_next(sg);
 872         }
 873 
 874         ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
 875         if (!ret)
 876                 ret = -ENOMEM;
 877         if (ret < 0) {
 878                 sg_free_table(sgt);
 879                 return ret;
 880         }
 881 
 882         sgt->nents = ret;
 883 
 884         return 0;
 885 }
 886 
 887 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
 888                    struct sg_table *sgt, enum dma_data_direction dir)
 889 {
 890         if (sgt->orig_nents) {
 891                 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
 892                 sg_free_table(sgt);
 893         }
 894 }
 895 
 896 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
 897 {
 898         struct device *tx_dev, *rx_dev;
 899         struct spi_transfer *xfer;
 900         int ret;
 901 
 902         if (!ctlr->can_dma)
 903                 return 0;
 904 
 905         if (ctlr->dma_tx)
 906                 tx_dev = ctlr->dma_tx->device->dev;
 907         else
 908                 tx_dev = ctlr->dev.parent;
 909 
 910         if (ctlr->dma_rx)
 911                 rx_dev = ctlr->dma_rx->device->dev;
 912         else
 913                 rx_dev = ctlr->dev.parent;
 914 
 915         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 916                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 917                         continue;
 918 
 919                 if (xfer->tx_buf != NULL) {
 920                         ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
 921                                           (void *)xfer->tx_buf, xfer->len,
 922                                           DMA_TO_DEVICE);
 923                         if (ret != 0)
 924                                 return ret;
 925                 }
 926 
 927                 if (xfer->rx_buf != NULL) {
 928                         ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
 929                                           xfer->rx_buf, xfer->len,
 930                                           DMA_FROM_DEVICE);
 931                         if (ret != 0) {
 932                                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
 933                                               DMA_TO_DEVICE);
 934                                 return ret;
 935                         }
 936                 }
 937         }
 938 
 939         ctlr->cur_msg_mapped = true;
 940 
 941         return 0;
 942 }
 943 
 944 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
 945 {
 946         struct spi_transfer *xfer;
 947         struct device *tx_dev, *rx_dev;
 948 
 949         if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
 950                 return 0;
 951 
 952         if (ctlr->dma_tx)
 953                 tx_dev = ctlr->dma_tx->device->dev;
 954         else
 955                 tx_dev = ctlr->dev.parent;
 956 
 957         if (ctlr->dma_rx)
 958                 rx_dev = ctlr->dma_rx->device->dev;
 959         else
 960                 rx_dev = ctlr->dev.parent;
 961 
 962         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 963                 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
 964                         continue;
 965 
 966                 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
 967                 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
 968         }
 969 
 970         return 0;
 971 }
 972 #else /* !CONFIG_HAS_DMA */
 973 static inline int __spi_map_msg(struct spi_controller *ctlr,
 974                                 struct spi_message *msg)
 975 {
 976         return 0;
 977 }
 978 
 979 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
 980                                   struct spi_message *msg)
 981 {
 982         return 0;
 983 }
 984 #endif /* !CONFIG_HAS_DMA */
 985 
 986 static inline int spi_unmap_msg(struct spi_controller *ctlr,
 987                                 struct spi_message *msg)
 988 {
 989         struct spi_transfer *xfer;
 990 
 991         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 992                 /*
 993                  * Restore the original value of tx_buf or rx_buf if they are
 994                  * NULL.
 995                  */
 996                 if (xfer->tx_buf == ctlr->dummy_tx)
 997                         xfer->tx_buf = NULL;
 998                 if (xfer->rx_buf == ctlr->dummy_rx)
 999                         xfer->rx_buf = NULL;
1000         }
1001 
1002         return __spi_unmap_msg(ctlr, msg);
1003 }
1004 
1005 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1006 {
1007         struct spi_transfer *xfer;
1008         void *tmp;
1009         unsigned int max_tx, max_rx;
1010 
1011         if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
1012                 max_tx = 0;
1013                 max_rx = 0;
1014 
1015                 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1016                         if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1017                             !xfer->tx_buf)
1018                                 max_tx = max(xfer->len, max_tx);
1019                         if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1020                             !xfer->rx_buf)
1021                                 max_rx = max(xfer->len, max_rx);
1022                 }
1023 
1024                 if (max_tx) {
1025                         tmp = krealloc(ctlr->dummy_tx, max_tx,
1026                                        GFP_KERNEL | GFP_DMA);
1027                         if (!tmp)
1028                                 return -ENOMEM;
1029                         ctlr->dummy_tx = tmp;
1030                         memset(tmp, 0, max_tx);
1031                 }
1032 
1033                 if (max_rx) {
1034                         tmp = krealloc(ctlr->dummy_rx, max_rx,
1035                                        GFP_KERNEL | GFP_DMA);
1036                         if (!tmp)
1037                                 return -ENOMEM;
1038                         ctlr->dummy_rx = tmp;
1039                 }
1040 
1041                 if (max_tx || max_rx) {
1042                         list_for_each_entry(xfer, &msg->transfers,
1043                                             transfer_list) {
1044                                 if (!xfer->len)
1045                                         continue;
1046                                 if (!xfer->tx_buf)
1047                                         xfer->tx_buf = ctlr->dummy_tx;
1048                                 if (!xfer->rx_buf)
1049                                         xfer->rx_buf = ctlr->dummy_rx;
1050                         }
1051                 }
1052         }
1053 
1054         return __spi_map_msg(ctlr, msg);
1055 }
1056 
1057 static int spi_transfer_wait(struct spi_controller *ctlr,
1058                              struct spi_message *msg,
1059                              struct spi_transfer *xfer)
1060 {
1061         struct spi_statistics *statm = &ctlr->statistics;
1062         struct spi_statistics *stats = &msg->spi->statistics;
1063         unsigned long long ms = 1;
1064 
1065         if (spi_controller_is_slave(ctlr)) {
1066                 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1067                         dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1068                         return -EINTR;
1069                 }
1070         } else {
1071                 ms = 8LL * 1000LL * xfer->len;
1072                 do_div(ms, xfer->speed_hz);
1073                 ms += ms + 200; /* some tolerance */
1074 
1075                 if (ms > UINT_MAX)
1076                         ms = UINT_MAX;
1077 
1078                 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1079                                                  msecs_to_jiffies(ms));
1080 
1081                 if (ms == 0) {
1082                         SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1083                         SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1084                         dev_err(&msg->spi->dev,
1085                                 "SPI transfer timed out\n");
1086                         return -ETIMEDOUT;
1087                 }
1088         }
1089 
1090         return 0;
1091 }
1092 
1093 static void _spi_transfer_delay_ns(u32 ns)
1094 {
1095         if (!ns)
1096                 return;
1097         if (ns <= 1000) {
1098                 ndelay(ns);
1099         } else {
1100                 u32 us = DIV_ROUND_UP(ns, 1000);
1101 
1102                 if (us <= 10)
1103                         udelay(us);
1104                 else
1105                         usleep_range(us, us + DIV_ROUND_UP(us, 10));
1106         }
1107 }
1108 
1109 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1110                                           struct spi_transfer *xfer)
1111 {
1112         u32 delay = xfer->cs_change_delay;
1113         u32 unit = xfer->cs_change_delay_unit;
1114         u32 hz;
1115 
1116         /* return early on "fast" mode - for everything but USECS */
1117         if (!delay && unit != SPI_DELAY_UNIT_USECS)
1118                 return;
1119 
1120         switch (unit) {
1121         case SPI_DELAY_UNIT_USECS:
1122                 /* for compatibility use default of 10us */
1123                 if (!delay)
1124                         delay = 10000;
1125                 else
1126                         delay *= 1000;
1127                 break;
1128         case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
1129                 break;
1130         case SPI_DELAY_UNIT_SCK:
1131                 /* if there is no effective speed know, then approximate
1132                  * by underestimating with half the requested hz
1133                  */
1134                 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1135                 delay *= DIV_ROUND_UP(1000000000, hz);
1136                 break;
1137         default:
1138                 dev_err_once(&msg->spi->dev,
1139                              "Use of unsupported delay unit %i, using default of 10us\n",
1140                              xfer->cs_change_delay_unit);
1141                 delay = 10000;
1142         }
1143         /* now sleep for the requested amount of time */
1144         _spi_transfer_delay_ns(delay);
1145 }
1146 
1147 /*
1148  * spi_transfer_one_message - Default implementation of transfer_one_message()
1149  *
1150  * This is a standard implementation of transfer_one_message() for
1151  * drivers which implement a transfer_one() operation.  It provides
1152  * standard handling of delays and chip select management.
1153  */
1154 static int spi_transfer_one_message(struct spi_controller *ctlr,
1155                                     struct spi_message *msg)
1156 {
1157         struct spi_transfer *xfer;
1158         bool keep_cs = false;
1159         int ret = 0;
1160         struct spi_statistics *statm = &ctlr->statistics;
1161         struct spi_statistics *stats = &msg->spi->statistics;
1162 
1163         spi_set_cs(msg->spi, true);
1164 
1165         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1166         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1167 
1168         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1169                 trace_spi_transfer_start(msg, xfer);
1170 
1171                 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1172                 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1173 
1174                 if (xfer->tx_buf || xfer->rx_buf) {
1175                         reinit_completion(&ctlr->xfer_completion);
1176 
1177                         ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1178                         if (ret < 0) {
1179                                 SPI_STATISTICS_INCREMENT_FIELD(statm,
1180                                                                errors);
1181                                 SPI_STATISTICS_INCREMENT_FIELD(stats,
1182                                                                errors);
1183                                 dev_err(&msg->spi->dev,
1184                                         "SPI transfer failed: %d\n", ret);
1185                                 goto out;
1186                         }
1187 
1188                         if (ret > 0) {
1189                                 ret = spi_transfer_wait(ctlr, msg, xfer);
1190                                 if (ret < 0)
1191                                         msg->status = ret;
1192                         }
1193                 } else {
1194                         if (xfer->len)
1195                                 dev_err(&msg->spi->dev,
1196                                         "Bufferless transfer has length %u\n",
1197                                         xfer->len);
1198                 }
1199 
1200                 trace_spi_transfer_stop(msg, xfer);
1201 
1202                 if (msg->status != -EINPROGRESS)
1203                         goto out;
1204 
1205                 if (xfer->delay_usecs)
1206                         _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
1207 
1208                 if (xfer->cs_change) {
1209                         if (list_is_last(&xfer->transfer_list,
1210                                          &msg->transfers)) {
1211                                 keep_cs = true;
1212                         } else {
1213                                 spi_set_cs(msg->spi, false);
1214                                 _spi_transfer_cs_change_delay(msg, xfer);
1215                                 spi_set_cs(msg->spi, true);
1216                         }
1217                 }
1218 
1219                 msg->actual_length += xfer->len;
1220         }
1221 
1222 out:
1223         if (ret != 0 || !keep_cs)
1224                 spi_set_cs(msg->spi, false);
1225 
1226         if (msg->status == -EINPROGRESS)
1227                 msg->status = ret;
1228 
1229         if (msg->status && ctlr->handle_err)
1230                 ctlr->handle_err(ctlr, msg);
1231 
1232         spi_res_release(ctlr, msg);
1233 
1234         spi_finalize_current_message(ctlr);
1235 
1236         return ret;
1237 }
1238 
1239 /**
1240  * spi_finalize_current_transfer - report completion of a transfer
1241  * @ctlr: the controller reporting completion
1242  *
1243  * Called by SPI drivers using the core transfer_one_message()
1244  * implementation to notify it that the current interrupt driven
1245  * transfer has finished and the next one may be scheduled.
1246  */
1247 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1248 {
1249         complete(&ctlr->xfer_completion);
1250 }
1251 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1252 
1253 /**
1254  * __spi_pump_messages - function which processes spi message queue
1255  * @ctlr: controller to process queue for
1256  * @in_kthread: true if we are in the context of the message pump thread
1257  *
1258  * This function checks if there is any spi message in the queue that
1259  * needs processing and if so call out to the driver to initialize hardware
1260  * and transfer each message.
1261  *
1262  * Note that it is called both from the kthread itself and also from
1263  * inside spi_sync(); the queue extraction handling at the top of the
1264  * function should deal with this safely.
1265  */
1266 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1267 {
1268         struct spi_message *msg;
1269         bool was_busy = false;
1270         unsigned long flags;
1271         int ret;
1272 
1273         /* Lock queue */
1274         spin_lock_irqsave(&ctlr->queue_lock, flags);
1275 
1276         /* Make sure we are not already running a message */
1277         if (ctlr->cur_msg) {
1278                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1279                 return;
1280         }
1281 
1282         /* If another context is idling the device then defer */
1283         if (ctlr->idling) {
1284                 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1285                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1286                 return;
1287         }
1288 
1289         /* Check if the queue is idle */
1290         if (list_empty(&ctlr->queue) || !ctlr->running) {
1291                 if (!ctlr->busy) {
1292                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1293                         return;
1294                 }
1295 
1296                 /* Only do teardown in the thread */
1297                 if (!in_kthread) {
1298                         kthread_queue_work(&ctlr->kworker,
1299                                            &ctlr->pump_messages);
1300                         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1301                         return;
1302                 }
1303 
1304                 ctlr->busy = false;
1305                 ctlr->idling = true;
1306                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1307 
1308                 kfree(ctlr->dummy_rx);
1309                 ctlr->dummy_rx = NULL;
1310                 kfree(ctlr->dummy_tx);
1311                 ctlr->dummy_tx = NULL;
1312                 if (ctlr->unprepare_transfer_hardware &&
1313                     ctlr->unprepare_transfer_hardware(ctlr))
1314                         dev_err(&ctlr->dev,
1315                                 "failed to unprepare transfer hardware\n");
1316                 if (ctlr->auto_runtime_pm) {
1317                         pm_runtime_mark_last_busy(ctlr->dev.parent);
1318                         pm_runtime_put_autosuspend(ctlr->dev.parent);
1319                 }
1320                 trace_spi_controller_idle(ctlr);
1321 
1322                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1323                 ctlr->idling = false;
1324                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1325                 return;
1326         }
1327 
1328         /* Extract head of queue */
1329         msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1330         ctlr->cur_msg = msg;
1331 
1332         list_del_init(&msg->queue);
1333         if (ctlr->busy)
1334                 was_busy = true;
1335         else
1336                 ctlr->busy = true;
1337         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1338 
1339         mutex_lock(&ctlr->io_mutex);
1340 
1341         if (!was_busy && ctlr->auto_runtime_pm) {
1342                 ret = pm_runtime_get_sync(ctlr->dev.parent);
1343                 if (ret < 0) {
1344                         pm_runtime_put_noidle(ctlr->dev.parent);
1345                         dev_err(&ctlr->dev, "Failed to power device: %d\n",
1346                                 ret);
1347                         mutex_unlock(&ctlr->io_mutex);
1348                         return;
1349                 }
1350         }
1351 
1352         if (!was_busy)
1353                 trace_spi_controller_busy(ctlr);
1354 
1355         if (!was_busy && ctlr->prepare_transfer_hardware) {
1356                 ret = ctlr->prepare_transfer_hardware(ctlr);
1357                 if (ret) {
1358                         dev_err(&ctlr->dev,
1359                                 "failed to prepare transfer hardware: %d\n",
1360                                 ret);
1361 
1362                         if (ctlr->auto_runtime_pm)
1363                                 pm_runtime_put(ctlr->dev.parent);
1364 
1365                         msg->status = ret;
1366                         spi_finalize_current_message(ctlr);
1367 
1368                         mutex_unlock(&ctlr->io_mutex);
1369                         return;
1370                 }
1371         }
1372 
1373         trace_spi_message_start(msg);
1374 
1375         if (ctlr->prepare_message) {
1376                 ret = ctlr->prepare_message(ctlr, msg);
1377                 if (ret) {
1378                         dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1379                                 ret);
1380                         msg->status = ret;
1381                         spi_finalize_current_message(ctlr);
1382                         goto out;
1383                 }
1384                 ctlr->cur_msg_prepared = true;
1385         }
1386 
1387         ret = spi_map_msg(ctlr, msg);
1388         if (ret) {
1389                 msg->status = ret;
1390                 spi_finalize_current_message(ctlr);
1391                 goto out;
1392         }
1393 
1394         ret = ctlr->transfer_one_message(ctlr, msg);
1395         if (ret) {
1396                 dev_err(&ctlr->dev,
1397                         "failed to transfer one message from queue\n");
1398                 goto out;
1399         }
1400 
1401 out:
1402         mutex_unlock(&ctlr->io_mutex);
1403 
1404         /* Prod the scheduler in case transfer_one() was busy waiting */
1405         if (!ret)
1406                 cond_resched();
1407 }
1408 
1409 /**
1410  * spi_pump_messages - kthread work function which processes spi message queue
1411  * @work: pointer to kthread work struct contained in the controller struct
1412  */
1413 static void spi_pump_messages(struct kthread_work *work)
1414 {
1415         struct spi_controller *ctlr =
1416                 container_of(work, struct spi_controller, pump_messages);
1417 
1418         __spi_pump_messages(ctlr, true);
1419 }
1420 
1421 /**
1422  * spi_set_thread_rt - set the controller to pump at realtime priority
1423  * @ctlr: controller to boost priority of
1424  *
1425  * This can be called because the controller requested realtime priority
1426  * (by setting the ->rt value before calling spi_register_controller()) or
1427  * because a device on the bus said that its transfers needed realtime
1428  * priority.
1429  *
1430  * NOTE: at the moment if any device on a bus says it needs realtime then
1431  * the thread will be at realtime priority for all transfers on that
1432  * controller.  If this eventually becomes a problem we may see if we can
1433  * find a way to boost the priority only temporarily during relevant
1434  * transfers.
1435  */
1436 static void spi_set_thread_rt(struct spi_controller *ctlr)
1437 {
1438         struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
1439 
1440         dev_info(&ctlr->dev,
1441                 "will run message pump with realtime priority\n");
1442         sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, &param);
1443 }
1444 
1445 static int spi_init_queue(struct spi_controller *ctlr)
1446 {
1447         ctlr->running = false;
1448         ctlr->busy = false;
1449 
1450         kthread_init_worker(&ctlr->kworker);
1451         ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1452                                          "%s", dev_name(&ctlr->dev));
1453         if (IS_ERR(ctlr->kworker_task)) {
1454                 dev_err(&ctlr->dev, "failed to create message pump task\n");
1455                 return PTR_ERR(ctlr->kworker_task);
1456         }
1457         kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1458 
1459         /*
1460          * Controller config will indicate if this controller should run the
1461          * message pump with high (realtime) priority to reduce the transfer
1462          * latency on the bus by minimising the delay between a transfer
1463          * request and the scheduling of the message pump thread. Without this
1464          * setting the message pump thread will remain at default priority.
1465          */
1466         if (ctlr->rt)
1467                 spi_set_thread_rt(ctlr);
1468 
1469         return 0;
1470 }
1471 
1472 /**
1473  * spi_get_next_queued_message() - called by driver to check for queued
1474  * messages
1475  * @ctlr: the controller to check for queued messages
1476  *
1477  * If there are more messages in the queue, the next message is returned from
1478  * this call.
1479  *
1480  * Return: the next message in the queue, else NULL if the queue is empty.
1481  */
1482 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1483 {
1484         struct spi_message *next;
1485         unsigned long flags;
1486 
1487         /* get a pointer to the next message, if any */
1488         spin_lock_irqsave(&ctlr->queue_lock, flags);
1489         next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1490                                         queue);
1491         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1492 
1493         return next;
1494 }
1495 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1496 
1497 /**
1498  * spi_finalize_current_message() - the current message is complete
1499  * @ctlr: the controller to return the message to
1500  *
1501  * Called by the driver to notify the core that the message in the front of the
1502  * queue is complete and can be removed from the queue.
1503  */
1504 void spi_finalize_current_message(struct spi_controller *ctlr)
1505 {
1506         struct spi_message *mesg;
1507         unsigned long flags;
1508         int ret;
1509 
1510         spin_lock_irqsave(&ctlr->queue_lock, flags);
1511         mesg = ctlr->cur_msg;
1512         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1513 
1514         spi_unmap_msg(ctlr, mesg);
1515 
1516         if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1517                 ret = ctlr->unprepare_message(ctlr, mesg);
1518                 if (ret) {
1519                         dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1520                                 ret);
1521                 }
1522         }
1523 
1524         spin_lock_irqsave(&ctlr->queue_lock, flags);
1525         ctlr->cur_msg = NULL;
1526         ctlr->cur_msg_prepared = false;
1527         kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1528         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1529 
1530         trace_spi_message_done(mesg);
1531 
1532         mesg->state = NULL;
1533         if (mesg->complete)
1534                 mesg->complete(mesg->context);
1535 }
1536 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1537 
1538 static int spi_start_queue(struct spi_controller *ctlr)
1539 {
1540         unsigned long flags;
1541 
1542         spin_lock_irqsave(&ctlr->queue_lock, flags);
1543 
1544         if (ctlr->running || ctlr->busy) {
1545                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1546                 return -EBUSY;
1547         }
1548 
1549         ctlr->running = true;
1550         ctlr->cur_msg = NULL;
1551         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1552 
1553         kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1554 
1555         return 0;
1556 }
1557 
1558 static int spi_stop_queue(struct spi_controller *ctlr)
1559 {
1560         unsigned long flags;
1561         unsigned limit = 500;
1562         int ret = 0;
1563 
1564         spin_lock_irqsave(&ctlr->queue_lock, flags);
1565 
1566         /*
1567          * This is a bit lame, but is optimized for the common execution path.
1568          * A wait_queue on the ctlr->busy could be used, but then the common
1569          * execution path (pump_messages) would be required to call wake_up or
1570          * friends on every SPI message. Do this instead.
1571          */
1572         while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1573                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1574                 usleep_range(10000, 11000);
1575                 spin_lock_irqsave(&ctlr->queue_lock, flags);
1576         }
1577 
1578         if (!list_empty(&ctlr->queue) || ctlr->busy)
1579                 ret = -EBUSY;
1580         else
1581                 ctlr->running = false;
1582 
1583         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1584 
1585         if (ret) {
1586                 dev_warn(&ctlr->dev, "could not stop message queue\n");
1587                 return ret;
1588         }
1589         return ret;
1590 }
1591 
1592 static int spi_destroy_queue(struct spi_controller *ctlr)
1593 {
1594         int ret;
1595 
1596         ret = spi_stop_queue(ctlr);
1597 
1598         /*
1599          * kthread_flush_worker will block until all work is done.
1600          * If the reason that stop_queue timed out is that the work will never
1601          * finish, then it does no good to call flush/stop thread, so
1602          * return anyway.
1603          */
1604         if (ret) {
1605                 dev_err(&ctlr->dev, "problem destroying queue\n");
1606                 return ret;
1607         }
1608 
1609         kthread_flush_worker(&ctlr->kworker);
1610         kthread_stop(ctlr->kworker_task);
1611 
1612         return 0;
1613 }
1614 
1615 static int __spi_queued_transfer(struct spi_device *spi,
1616                                  struct spi_message *msg,
1617                                  bool need_pump)
1618 {
1619         struct spi_controller *ctlr = spi->controller;
1620         unsigned long flags;
1621 
1622         spin_lock_irqsave(&ctlr->queue_lock, flags);
1623 
1624         if (!ctlr->running) {
1625                 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1626                 return -ESHUTDOWN;
1627         }
1628         msg->actual_length = 0;
1629         msg->status = -EINPROGRESS;
1630 
1631         list_add_tail(&msg->queue, &ctlr->queue);
1632         if (!ctlr->busy && need_pump)
1633                 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1634 
1635         spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1636         return 0;
1637 }
1638 
1639 /**
1640  * spi_queued_transfer - transfer function for queued transfers
1641  * @spi: spi device which is requesting transfer
1642  * @msg: spi message which is to handled is queued to driver queue
1643  *
1644  * Return: zero on success, else a negative error code.
1645  */
1646 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1647 {
1648         return __spi_queued_transfer(spi, msg, true);
1649 }
1650 
1651 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1652 {
1653         int ret;
1654 
1655         ctlr->transfer = spi_queued_transfer;
1656         if (!ctlr->transfer_one_message)
1657                 ctlr->transfer_one_message = spi_transfer_one_message;
1658 
1659         /* Initialize and start queue */
1660         ret = spi_init_queue(ctlr);
1661         if (ret) {
1662                 dev_err(&ctlr->dev, "problem initializing queue\n");
1663                 goto err_init_queue;
1664         }
1665         ctlr->queued = true;
1666         ret = spi_start_queue(ctlr);
1667         if (ret) {
1668                 dev_err(&ctlr->dev, "problem starting queue\n");
1669                 goto err_start_queue;
1670         }
1671 
1672         return 0;
1673 
1674 err_start_queue:
1675         spi_destroy_queue(ctlr);
1676 err_init_queue:
1677         return ret;
1678 }
1679 
1680 /**
1681  * spi_flush_queue - Send all pending messages in the queue from the callers'
1682  *                   context
1683  * @ctlr: controller to process queue for
1684  *
1685  * This should be used when one wants to ensure all pending messages have been
1686  * sent before doing something. Is used by the spi-mem code to make sure SPI
1687  * memory operations do not preempt regular SPI transfers that have been queued
1688  * before the spi-mem operation.
1689  */
1690 void spi_flush_queue(struct spi_controller *ctlr)
1691 {
1692         if (ctlr->transfer == spi_queued_transfer)
1693                 __spi_pump_messages(ctlr, false);
1694 }
1695 
1696 /*-------------------------------------------------------------------------*/
1697 
1698 #if defined(CONFIG_OF)
1699 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1700                            struct device_node *nc)
1701 {
1702         u32 value;
1703         int rc;
1704 
1705         /* Mode (clock phase/polarity/etc.) */
1706         if (of_property_read_bool(nc, "spi-cpha"))
1707                 spi->mode |= SPI_CPHA;
1708         if (of_property_read_bool(nc, "spi-cpol"))
1709                 spi->mode |= SPI_CPOL;
1710         if (of_property_read_bool(nc, "spi-3wire"))
1711                 spi->mode |= SPI_3WIRE;
1712         if (of_property_read_bool(nc, "spi-lsb-first"))
1713                 spi->mode |= SPI_LSB_FIRST;
1714         if (of_property_read_bool(nc, "spi-cs-high"))
1715                 spi->mode |= SPI_CS_HIGH;
1716 
1717         /* Device DUAL/QUAD mode */
1718         if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1719                 switch (value) {
1720                 case 1:
1721                         break;
1722                 case 2:
1723                         spi->mode |= SPI_TX_DUAL;
1724                         break;
1725                 case 4:
1726                         spi->mode |= SPI_TX_QUAD;
1727                         break;
1728                 case 8:
1729                         spi->mode |= SPI_TX_OCTAL;
1730                         break;
1731                 default:
1732                         dev_warn(&ctlr->dev,
1733                                 "spi-tx-bus-width %d not supported\n",
1734                                 value);
1735                         break;
1736                 }
1737         }
1738 
1739         if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1740                 switch (value) {
1741                 case 1:
1742                         break;
1743                 case 2:
1744                         spi->mode |= SPI_RX_DUAL;
1745                         break;
1746                 case 4:
1747                         spi->mode |= SPI_RX_QUAD;
1748                         break;
1749                 case 8:
1750                         spi->mode |= SPI_RX_OCTAL;
1751                         break;
1752                 default:
1753                         dev_warn(&ctlr->dev,
1754                                 "spi-rx-bus-width %d not supported\n",
1755                                 value);
1756                         break;
1757                 }
1758         }
1759 
1760         if (spi_controller_is_slave(ctlr)) {
1761                 if (!of_node_name_eq(nc, "slave")) {
1762                         dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1763                                 nc);
1764                         return -EINVAL;
1765                 }
1766                 return 0;
1767         }
1768 
1769         /* Device address */
1770         rc = of_property_read_u32(nc, "reg", &value);
1771         if (rc) {
1772                 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1773                         nc, rc);
1774                 return rc;
1775         }
1776         spi->chip_select = value;
1777 
1778         /*
1779          * For descriptors associated with the device, polarity inversion is
1780          * handled in the gpiolib, so all gpio chip selects are "active high"
1781          * in the logical sense, the gpiolib will invert the line if need be.
1782          */
1783         if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
1784             ctlr->cs_gpiods[spi->chip_select])
1785                 spi->mode |= SPI_CS_HIGH;
1786 
1787         /* Device speed */
1788         rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1789         if (rc) {
1790                 dev_err(&ctlr->dev,
1791                         "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1792                 return rc;
1793         }
1794         spi->max_speed_hz = value;
1795 
1796         return 0;
1797 }
1798 
1799 static struct spi_device *
1800 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1801 {
1802         struct spi_device *spi;
1803         int rc;
1804 
1805         /* Alloc an spi_device */
1806         spi = spi_alloc_device(ctlr);
1807         if (!spi) {
1808                 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1809                 rc = -ENOMEM;
1810                 goto err_out;
1811         }
1812 
1813         /* Select device driver */
1814         rc = of_modalias_node(nc, spi->modalias,
1815                                 sizeof(spi->modalias));
1816         if (rc < 0) {
1817                 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1818                 goto err_out;
1819         }
1820 
1821         rc = of_spi_parse_dt(ctlr, spi, nc);
1822         if (rc)
1823                 goto err_out;
1824 
1825         /* Store a pointer to the node in the device structure */
1826         of_node_get(nc);
1827         spi->dev.of_node = nc;
1828 
1829         /* Register the new device */
1830         rc = spi_add_device(spi);
1831         if (rc) {
1832                 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1833                 goto err_of_node_put;
1834         }
1835 
1836         return spi;
1837 
1838 err_of_node_put:
1839         of_node_put(nc);
1840 err_out:
1841         spi_dev_put(spi);
1842         return ERR_PTR(rc);
1843 }
1844 
1845 /**
1846  * of_register_spi_devices() - Register child devices onto the SPI bus
1847  * @ctlr:       Pointer to spi_controller device
1848  *
1849  * Registers an spi_device for each child node of controller node which
1850  * represents a valid SPI slave.
1851  */
1852 static void of_register_spi_devices(struct spi_controller *ctlr)
1853 {
1854         struct spi_device *spi;
1855         struct device_node *nc;
1856 
1857         if (!ctlr->dev.of_node)
1858                 return;
1859 
1860         for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1861                 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1862                         continue;
1863                 spi = of_register_spi_device(ctlr, nc);
1864                 if (IS_ERR(spi)) {
1865                         dev_warn(&ctlr->dev,
1866                                  "Failed to create SPI device for %pOF\n", nc);
1867                         of_node_clear_flag(nc, OF_POPULATED);
1868                 }
1869         }
1870 }
1871 #else
1872 static void of_register_spi_devices(struct spi_controller *ctlr) { }
1873 #endif
1874 
1875 #ifdef CONFIG_ACPI
1876 struct acpi_spi_lookup {
1877         struct spi_controller   *ctlr;
1878         u32                     max_speed_hz;
1879         u32                     mode;
1880         int                     irq;
1881         u8                      bits_per_word;
1882         u8                      chip_select;
1883 };
1884 
1885 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
1886                                             struct acpi_spi_lookup *lookup)
1887 {
1888         const union acpi_object *obj;
1889 
1890         if (!x86_apple_machine)
1891                 return;
1892 
1893         if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1894             && obj->buffer.length >= 4)
1895                 lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1896 
1897         if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1898             && obj->buffer.length == 8)
1899                 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
1900 
1901         if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1902             && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1903                 lookup->mode |= SPI_LSB_FIRST;
1904 
1905         if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1906             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1907                 lookup->mode |= SPI_CPOL;
1908 
1909         if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1910             && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
1911                 lookup->mode |= SPI_CPHA;
1912 }
1913 
1914 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1915 {
1916         struct acpi_spi_lookup *lookup = data;
1917         struct spi_controller *ctlr = lookup->ctlr;
1918 
1919         if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1920                 struct acpi_resource_spi_serialbus *sb;
1921                 acpi_handle parent_handle;
1922                 acpi_status status;
1923 
1924                 sb = &ares->data.spi_serial_bus;
1925                 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1926 
1927                         status = acpi_get_handle(NULL,
1928                                                  sb->resource_source.string_ptr,
1929                                                  &parent_handle);
1930 
1931                         if (ACPI_FAILURE(status) ||
1932                             ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
1933                                 return -ENODEV;
1934 
1935                         /*
1936                          * ACPI DeviceSelection numbering is handled by the
1937                          * host controller driver in Windows and can vary
1938                          * from driver to driver. In Linux we always expect
1939                          * 0 .. max - 1 so we need to ask the driver to
1940                          * translate between the two schemes.
1941                          */
1942                         if (ctlr->fw_translate_cs) {
1943                                 int cs = ctlr->fw_translate_cs(ctlr,
1944                                                 sb->device_selection);
1945                                 if (cs < 0)
1946                                         return cs;
1947                                 lookup->chip_select = cs;
1948                         } else {
1949                                 lookup->chip_select = sb->device_selection;
1950                         }
1951 
1952                         lookup->max_speed_hz = sb->connection_speed;
1953 
1954                         if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1955                                 lookup->mode |= SPI_CPHA;
1956                         if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1957                                 lookup->mode |= SPI_CPOL;
1958                         if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1959                                 lookup->mode |= SPI_CS_HIGH;
1960                 }
1961         } else if (lookup->irq < 0) {
1962                 struct resource r;
1963 
1964                 if (acpi_dev_resource_interrupt(ares, 0, &r))
1965                         lookup->irq = r.start;
1966         }
1967 
1968         /* Always tell the ACPI core to skip this resource */
1969         return 1;
1970 }
1971 
1972 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1973                                             struct acpi_device *adev)
1974 {
1975         acpi_handle parent_handle = NULL;
1976         struct list_head resource_list;
1977         struct acpi_spi_lookup lookup = {};
1978         struct spi_device *spi;
1979         int ret;
1980 
1981         if (acpi_bus_get_status(adev) || !adev->status.present ||
1982             acpi_device_enumerated(adev))
1983                 return AE_OK;
1984 
1985         lookup.ctlr             = ctlr;
1986         lookup.irq              = -1;
1987 
1988         INIT_LIST_HEAD(&resource_list);
1989         ret = acpi_dev_get_resources(adev, &resource_list,
1990                                      acpi_spi_add_resource, &lookup);
1991         acpi_dev_free_resource_list(&resource_list);
1992 
1993         if (ret < 0)
1994                 /* found SPI in _CRS but it points to another controller */
1995                 return AE_OK;
1996 
1997         if (!lookup.max_speed_hz &&
1998             !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
1999             ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2000                 /* Apple does not use _CRS but nested devices for SPI slaves */
2001                 acpi_spi_parse_apple_properties(adev, &lookup);
2002         }
2003 
2004         if (!lookup.max_speed_hz)
2005                 return AE_OK;
2006 
2007         spi = spi_alloc_device(ctlr);
2008         if (!spi) {
2009                 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2010                         dev_name(&adev->dev));
2011                 return AE_NO_MEMORY;
2012         }
2013 
2014         ACPI_COMPANION_SET(&spi->dev, adev);
2015         spi->max_speed_hz       = lookup.max_speed_hz;
2016         spi->mode               = lookup.mode;
2017         spi->irq                = lookup.irq;
2018         spi->bits_per_word      = lookup.bits_per_word;
2019         spi->chip_select        = lookup.chip_select;
2020 
2021         acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2022                           sizeof(spi->modalias));
2023 
2024         if (spi->irq < 0)
2025                 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2026 
2027         acpi_device_set_enumerated(adev);
2028 
2029         adev->power.flags.ignore_parent = true;
2030         if (spi_add_device(spi)) {
2031                 adev->power.flags.ignore_parent = false;
2032                 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2033                         dev_name(&adev->dev));
2034                 spi_dev_put(spi);
2035         }
2036 
2037         return AE_OK;
2038 }
2039 
2040 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2041                                        void *data, void **return_value)
2042 {
2043         struct spi_controller *ctlr = data;
2044         struct acpi_device *adev;
2045 
2046         if (acpi_bus_get_device(handle, &adev))
2047                 return AE_OK;
2048 
2049         return acpi_register_spi_device(ctlr, adev);
2050 }
2051 
2052 #define SPI_ACPI_ENUMERATE_MAX_DEPTH            32
2053 
2054 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2055 {
2056         acpi_status status;
2057         acpi_handle handle;
2058 
2059         handle = ACPI_HANDLE(ctlr->dev.parent);
2060         if (!handle)
2061                 return;
2062 
2063         status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2064                                      SPI_ACPI_ENUMERATE_MAX_DEPTH,
2065                                      acpi_spi_add_device, NULL, ctlr, NULL);
2066         if (ACPI_FAILURE(status))
2067                 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2068 }
2069 #else
2070 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2071 #endif /* CONFIG_ACPI */
2072 
2073 static void spi_controller_release(struct device *dev)
2074 {
2075         struct spi_controller *ctlr;
2076 
2077         ctlr = container_of(dev, struct spi_controller, dev);
2078         kfree(ctlr);
2079 }
2080 
2081 static struct class spi_master_class = {
2082         .name           = "spi_master",
2083         .owner          = THIS_MODULE,
2084         .dev_release    = spi_controller_release,
2085         .dev_groups     = spi_master_groups,
2086 };
2087 
2088 #ifdef CONFIG_SPI_SLAVE
2089 /**
2090  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2091  *                   controller
2092  * @spi: device used for the current transfer
2093  */
2094 int spi_slave_abort(struct spi_device *spi)
2095 {
2096         struct spi_controller *ctlr = spi->controller;
2097 
2098         if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2099                 return ctlr->slave_abort(ctlr);
2100 
2101         return -ENOTSUPP;
2102 }
2103 EXPORT_SYMBOL_GPL(spi_slave_abort);
2104 
2105 static int match_true(struct device *dev, void *data)
2106 {
2107         return 1;
2108 }
2109 
2110 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2111                           char *buf)
2112 {
2113         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2114                                                    dev);
2115         struct device *child;
2116 
2117         child = device_find_child(&ctlr->dev, NULL, match_true);
2118         return sprintf(buf, "%s\n",
2119                        child ? to_spi_device(child)->modalias : NULL);
2120 }
2121 
2122 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2123                            const char *buf, size_t count)
2124 {
2125         struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2126                                                    dev);
2127         struct spi_device *spi;
2128         struct device *child;
2129         char name[32];
2130         int rc;
2131 
2132         rc = sscanf(buf, "%31s", name);
2133         if (rc != 1 || !name[0])
2134                 return -EINVAL;
2135 
2136         child = device_find_child(&ctlr->dev, NULL, match_true);
2137         if (child) {
2138                 /* Remove registered slave */
2139                 device_unregister(child);
2140                 put_device(child);
2141         }
2142 
2143         if (strcmp(name, "(null)")) {
2144                 /* Register new slave */
2145                 spi = spi_alloc_device(ctlr);
2146                 if (!spi)
2147                         return -ENOMEM;
2148 
2149                 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2150 
2151                 rc = spi_add_device(spi);
2152                 if (rc) {
2153                         spi_dev_put(spi);
2154                         return rc;
2155                 }
2156         }
2157 
2158         return count;
2159 }
2160 
2161 static DEVICE_ATTR_RW(slave);
2162 
2163 static struct attribute *spi_slave_attrs[] = {
2164         &dev_attr_slave.attr,
2165         NULL,
2166 };
2167 
2168 static const struct attribute_group spi_slave_group = {
2169         .attrs = spi_slave_attrs,
2170 };
2171 
2172 static const struct attribute_group *spi_slave_groups[] = {
2173         &spi_controller_statistics_group,
2174         &spi_slave_group,
2175         NULL,
2176 };
2177 
2178 static struct class spi_slave_class = {
2179         .name           = "spi_slave",
2180         .owner          = THIS_MODULE,
2181         .dev_release    = spi_controller_release,
2182         .dev_groups     = spi_slave_groups,
2183 };
2184 #else
2185 extern struct class spi_slave_class;    /* dummy */
2186 #endif
2187 
2188 /**
2189  * __spi_alloc_controller - allocate an SPI master or slave controller
2190  * @dev: the controller, possibly using the platform_bus
2191  * @size: how much zeroed driver-private data to allocate; the pointer to this
2192  *      memory is in the driver_data field of the returned device, accessible
2193  *      with spi_controller_get_devdata(); the memory is cacheline aligned;
2194  *      drivers granting DMA access to portions of their private data need to
2195  *      round up @size using ALIGN(size, dma_get_cache_alignment()).
2196  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2197  *      slave (true) controller
2198  * Context: can sleep
2199  *
2200  * This call is used only by SPI controller drivers, which are the
2201  * only ones directly touching chip registers.  It's how they allocate
2202  * an spi_controller structure, prior to calling spi_register_controller().
2203  *
2204  * This must be called from context that can sleep.
2205  *
2206  * The caller is responsible for assigning the bus number and initializing the
2207  * controller's methods before calling spi_register_controller(); and (after
2208  * errors adding the device) calling spi_controller_put() to prevent a memory
2209  * leak.
2210  *
2211  * Return: the SPI controller structure on success, else NULL.
2212  */
2213 struct spi_controller *__spi_alloc_controller(struct device *dev,
2214                                               unsigned int size, bool slave)
2215 {
2216         struct spi_controller   *ctlr;
2217         size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2218 
2219         if (!dev)
2220                 return NULL;
2221 
2222         ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2223         if (!ctlr)
2224                 return NULL;
2225 
2226         device_initialize(&ctlr->dev);
2227         ctlr->bus_num = -1;
2228         ctlr->num_chipselect = 1;
2229         ctlr->slave = slave;
2230         if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2231                 ctlr->dev.class = &spi_slave_class;
2232         else
2233                 ctlr->dev.class = &spi_master_class;
2234         ctlr->dev.parent = dev;
2235         pm_suspend_ignore_children(&ctlr->dev, true);
2236         spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2237 
2238         return ctlr;
2239 }
2240 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2241 
2242 #ifdef CONFIG_OF
2243 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2244 {
2245         int nb, i, *cs;
2246         struct device_node *np = ctlr->dev.of_node;
2247 
2248         if (!np)
2249                 return 0;
2250 
2251         nb = of_gpio_named_count(np, "cs-gpios");
2252         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2253 
2254         /* Return error only for an incorrectly formed cs-gpios property */
2255         if (nb == 0 || nb == -ENOENT)
2256                 return 0;
2257         else if (nb < 0)
2258                 return nb;
2259 
2260         cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2261                           GFP_KERNEL);
2262         ctlr->cs_gpios = cs;
2263 
2264         if (!ctlr->cs_gpios)
2265                 return -ENOMEM;
2266 
2267         for (i = 0; i < ctlr->num_chipselect; i++)
2268                 cs[i] = -ENOENT;
2269 
2270         for (i = 0; i < nb; i++)
2271                 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2272 
2273         return 0;
2274 }
2275 #else
2276 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2277 {
2278         return 0;
2279 }
2280 #endif
2281 
2282 /**
2283  * spi_get_gpio_descs() - grab chip select GPIOs for the master
2284  * @ctlr: The SPI master to grab GPIO descriptors for
2285  */
2286 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2287 {
2288         int nb, i;
2289         struct gpio_desc **cs;
2290         struct device *dev = &ctlr->dev;
2291 
2292         nb = gpiod_count(dev, "cs");
2293         ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2294 
2295         /* No GPIOs at all is fine, else return the error */
2296         if (nb == 0 || nb == -ENOENT)
2297                 return 0;
2298         else if (nb < 0)
2299                 return nb;
2300 
2301         cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2302                           GFP_KERNEL);
2303         if (!cs)
2304                 return -ENOMEM;
2305         ctlr->cs_gpiods = cs;
2306 
2307         for (i = 0; i < nb; i++) {
2308                 /*
2309                  * Most chipselects are active low, the inverted
2310                  * semantics are handled by special quirks in gpiolib,
2311                  * so initializing them GPIOD_OUT_LOW here means
2312                  * "unasserted", in most cases this will drive the physical
2313                  * line high.
2314                  */
2315                 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2316                                                       GPIOD_OUT_LOW);
2317                 if (IS_ERR(cs[i]))
2318                         return PTR_ERR(cs[i]);
2319 
2320                 if (cs[i]) {
2321                         /*
2322                          * If we find a CS GPIO, name it after the device and
2323                          * chip select line.
2324                          */
2325                         char *gpioname;
2326 
2327                         gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2328                                                   dev_name(dev), i);
2329                         if (!gpioname)
2330                                 return -ENOMEM;
2331                         gpiod_set_consumer_name(cs[i], gpioname);
2332                 }
2333         }
2334 
2335         return 0;
2336 }
2337 
2338 static int spi_controller_check_ops(struct spi_controller *ctlr)
2339 {
2340         /*
2341          * The controller may implement only the high-level SPI-memory like
2342          * operations if it does not support regular SPI transfers, and this is
2343          * valid use case.
2344          * If ->mem_ops is NULL, we request that at least one of the
2345          * ->transfer_xxx() method be implemented.
2346          */
2347         if (ctlr->mem_ops) {
2348                 if (!ctlr->mem_ops->exec_op)
2349                         return -EINVAL;
2350         } else if (!ctlr->transfer && !ctlr->transfer_one &&
2351                    !ctlr->transfer_one_message) {
2352                 return -EINVAL;
2353         }
2354 
2355         return 0;
2356 }
2357 
2358 /**
2359  * spi_register_controller - register SPI master or slave controller
2360  * @ctlr: initialized master, originally from spi_alloc_master() or
2361  *      spi_alloc_slave()
2362  * Context: can sleep
2363  *
2364  * SPI controllers connect to their drivers using some non-SPI bus,
2365  * such as the platform bus.  The final stage of probe() in that code
2366  * includes calling spi_register_controller() to hook up to this SPI bus glue.
2367  *
2368  * SPI controllers use board specific (often SOC specific) bus numbers,
2369  * and board-specific addressing for SPI devices combines those numbers
2370  * with chip select numbers.  Since SPI does not directly support dynamic
2371  * device identification, boards need configuration tables telling which
2372  * chip is at which address.
2373  *
2374  * This must be called from context that can sleep.  It returns zero on
2375  * success, else a negative error code (dropping the controller's refcount).
2376  * After a successful return, the caller is responsible for calling
2377  * spi_unregister_controller().
2378  *
2379  * Return: zero on success, else a negative error code.
2380  */
2381 int spi_register_controller(struct spi_controller *ctlr)
2382 {
2383         struct device           *dev = ctlr->dev.parent;
2384         struct boardinfo        *bi;
2385         int                     status;
2386         int                     id, first_dynamic;
2387 
2388         if (!dev)
2389                 return -ENODEV;
2390 
2391         /*
2392          * Make sure all necessary hooks are implemented before registering
2393          * the SPI controller.
2394          */
2395         status = spi_controller_check_ops(ctlr);
2396         if (status)
2397                 return status;
2398 
2399         if (ctlr->bus_num >= 0) {
2400                 /* devices with a fixed bus num must check-in with the num */
2401                 mutex_lock(&board_lock);
2402                 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2403                         ctlr->bus_num + 1, GFP_KERNEL);
2404                 mutex_unlock(&board_lock);
2405                 if (WARN(id < 0, "couldn't get idr"))
2406                         return id == -ENOSPC ? -EBUSY : id;
2407                 ctlr->bus_num = id;
2408         } else if (ctlr->dev.of_node) {
2409                 /* allocate dynamic bus number using Linux idr */
2410                 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2411                 if (id >= 0) {
2412                         ctlr->bus_num = id;
2413                         mutex_lock(&board_lock);
2414                         id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2415                                        ctlr->bus_num + 1, GFP_KERNEL);
2416                         mutex_unlock(&board_lock);
2417                         if (WARN(id < 0, "couldn't get idr"))
2418                                 return id == -ENOSPC ? -EBUSY : id;
2419                 }
2420         }
2421         if (ctlr->bus_num < 0) {
2422                 first_dynamic = of_alias_get_highest_id("spi");
2423                 if (first_dynamic < 0)
2424                         first_dynamic = 0;
2425                 else
2426                         first_dynamic++;
2427 
2428                 mutex_lock(&board_lock);
2429                 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2430                                0, GFP_KERNEL);
2431                 mutex_unlock(&board_lock);
2432                 if (WARN(id < 0, "couldn't get idr"))
2433                         return id;
2434                 ctlr->bus_num = id;
2435         }
2436         INIT_LIST_HEAD(&ctlr->queue);
2437         spin_lock_init(&ctlr->queue_lock);
2438         spin_lock_init(&ctlr->bus_lock_spinlock);
2439         mutex_init(&ctlr->bus_lock_mutex);
2440         mutex_init(&ctlr->io_mutex);
2441         ctlr->bus_lock_flag = 0;
2442         init_completion(&ctlr->xfer_completion);
2443         if (!ctlr->max_dma_len)
2444                 ctlr->max_dma_len = INT_MAX;
2445 
2446         /* register the device, then userspace will see it.
2447          * registration fails if the bus ID is in use.
2448          */
2449         dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2450 
2451         if (!spi_controller_is_slave(ctlr)) {
2452                 if (ctlr->use_gpio_descriptors) {
2453                         status = spi_get_gpio_descs(ctlr);
2454                         if (status)
2455                                 goto free_bus_id;
2456                         /*
2457                          * A controller using GPIO descriptors always
2458                          * supports SPI_CS_HIGH if need be.
2459                          */
2460                         ctlr->mode_bits |= SPI_CS_HIGH;
2461                 } else {
2462                         /* Legacy code path for GPIOs from DT */
2463                         status = of_spi_get_gpio_numbers(ctlr);
2464                         if (status)
2465                                 goto free_bus_id;
2466                 }
2467         }
2468 
2469         /*
2470          * Even if it's just one always-selected device, there must
2471          * be at least one chipselect.
2472          */
2473         if (!ctlr->num_chipselect) {
2474                 status = -EINVAL;
2475                 goto free_bus_id;
2476         }
2477 
2478         status = device_add(&ctlr->dev);
2479         if (status < 0)
2480                 goto free_bus_id;
2481         dev_dbg(dev, "registered %s %s\n",
2482                         spi_controller_is_slave(ctlr) ? "slave" : "master",
2483                         dev_name(&ctlr->dev));
2484 
2485         /*
2486          * If we're using a queued driver, start the queue. Note that we don't
2487          * need the queueing logic if the driver is only supporting high-level
2488          * memory operations.
2489          */
2490         if (ctlr->transfer) {
2491                 dev_info(dev, "controller is unqueued, this is deprecated\n");
2492         } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2493                 status = spi_controller_initialize_queue(ctlr);
2494                 if (status) {
2495                         device_del(&ctlr->dev);
2496                         goto free_bus_id;
2497                 }
2498         }
2499         /* add statistics */
2500         spin_lock_init(&ctlr->statistics.lock);
2501 
2502         mutex_lock(&board_lock);
2503         list_add_tail(&ctlr->list, &spi_controller_list);
2504         list_for_each_entry(bi, &board_list, list)
2505                 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2506         mutex_unlock(&board_lock);
2507 
2508         /* Register devices from the device tree and ACPI */
2509         of_register_spi_devices(ctlr);
2510         acpi_register_spi_devices(ctlr);
2511         return status;
2512 
2513 free_bus_id:
2514         mutex_lock(&board_lock);
2515         idr_remove(&spi_master_idr, ctlr->bus_num);
2516         mutex_unlock(&board_lock);
2517         return status;
2518 }
2519 EXPORT_SYMBOL_GPL(spi_register_controller);
2520 
2521 static void devm_spi_unregister(struct device *dev, void *res)
2522 {
2523         spi_unregister_controller(*(struct spi_controller **)res);
2524 }
2525 
2526 /**
2527  * devm_spi_register_controller - register managed SPI master or slave
2528  *      controller
2529  * @dev:    device managing SPI controller
2530  * @ctlr: initialized controller, originally from spi_alloc_master() or
2531  *      spi_alloc_slave()
2532  * Context: can sleep
2533  *
2534  * Register a SPI device as with spi_register_controller() which will
2535  * automatically be unregistered and freed.
2536  *
2537  * Return: zero on success, else a negative error code.
2538  */
2539 int devm_spi_register_controller(struct device *dev,
2540                                  struct spi_controller *ctlr)
2541 {
2542         struct spi_controller **ptr;
2543         int ret;
2544 
2545         ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2546         if (!ptr)
2547                 return -ENOMEM;
2548 
2549         ret = spi_register_controller(ctlr);
2550         if (!ret) {
2551                 *ptr = ctlr;
2552                 devres_add(dev, ptr);
2553         } else {
2554                 devres_free(ptr);
2555         }
2556 
2557         return ret;
2558 }
2559 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2560 
2561 static int __unregister(struct device *dev, void *null)
2562 {
2563         spi_unregister_device(to_spi_device(dev));
2564         return 0;
2565 }
2566 
2567 /**
2568  * spi_unregister_controller - unregister SPI master or slave controller
2569  * @ctlr: the controller being unregistered
2570  * Context: can sleep
2571  *
2572  * This call is used only by SPI controller drivers, which are the
2573  * only ones directly touching chip registers.
2574  *
2575  * This must be called from context that can sleep.
2576  *
2577  * Note that this function also drops a reference to the controller.
2578  */
2579 void spi_unregister_controller(struct spi_controller *ctlr)
2580 {
2581         struct spi_controller *found;
2582         int id = ctlr->bus_num;
2583 
2584         device_for_each_child(&ctlr->dev, NULL, __unregister);
2585 
2586         /* First make sure that this controller was ever added */
2587         mutex_lock(&board_lock);
2588         found = idr_find(&spi_master_idr, id);
2589         mutex_unlock(&board_lock);
2590         if (ctlr->queued) {
2591                 if (spi_destroy_queue(ctlr))
2592                         dev_err(&ctlr->dev, "queue remove failed\n");
2593         }
2594         mutex_lock(&board_lock);
2595         list_del(&ctlr->list);
2596         mutex_unlock(&board_lock);
2597 
2598         device_unregister(&ctlr->dev);
2599         /* free bus id */
2600         mutex_lock(&board_lock);
2601         if (found == ctlr)
2602                 idr_remove(&spi_master_idr, id);
2603         mutex_unlock(&board_lock);
2604 }
2605 EXPORT_SYMBOL_GPL(spi_unregister_controller);
2606 
2607 int spi_controller_suspend(struct spi_controller *ctlr)
2608 {
2609         int ret;
2610 
2611         /* Basically no-ops for non-queued controllers */
2612         if (!ctlr->queued)
2613                 return 0;
2614 
2615         ret = spi_stop_queue(ctlr);
2616         if (ret)
2617                 dev_err(&ctlr->dev, "queue stop failed\n");
2618 
2619         return ret;
2620 }
2621 EXPORT_SYMBOL_GPL(spi_controller_suspend);
2622 
2623 int spi_controller_resume(struct spi_controller *ctlr)
2624 {
2625         int ret;
2626 
2627         if (!ctlr->queued)
2628                 return 0;
2629 
2630         ret = spi_start_queue(ctlr);
2631         if (ret)
2632                 dev_err(&ctlr->dev, "queue restart failed\n");
2633 
2634         return ret;
2635 }
2636 EXPORT_SYMBOL_GPL(spi_controller_resume);
2637 
2638 static int __spi_controller_match(struct device *dev, const void *data)
2639 {
2640         struct spi_controller *ctlr;
2641         const u16 *bus_num = data;
2642 
2643         ctlr = container_of(dev, struct spi_controller, dev);
2644         return ctlr->bus_num == *bus_num;
2645 }
2646 
2647 /**
2648  * spi_busnum_to_master - look up master associated with bus_num
2649  * @bus_num: the master's bus number
2650  * Context: can sleep
2651  *
2652  * This call may be used with devices that are registered after
2653  * arch init time.  It returns a refcounted pointer to the relevant
2654  * spi_controller (which the caller must release), or NULL if there is
2655  * no such master registered.
2656  *
2657  * Return: the SPI master structure on success, else NULL.
2658  */
2659 struct spi_controller *spi_busnum_to_master(u16 bus_num)
2660 {
2661         struct device           *dev;
2662         struct spi_controller   *ctlr = NULL;
2663 
2664         dev = class_find_device(&spi_master_class, NULL, &bus_num,
2665                                 __spi_controller_match);
2666         if (dev)
2667                 ctlr = container_of(dev, struct spi_controller, dev);
2668         /* reference got in class_find_device */
2669         return ctlr;
2670 }
2671 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2672 
2673 /*-------------------------------------------------------------------------*/
2674 
2675 /* Core methods for SPI resource management */
2676 
2677 /**
2678  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2679  *                 during the processing of a spi_message while using
2680  *                 spi_transfer_one
2681  * @spi:     the spi device for which we allocate memory
2682  * @release: the release code to execute for this resource
2683  * @size:    size to alloc and return
2684  * @gfp:     GFP allocation flags
2685  *
2686  * Return: the pointer to the allocated data
2687  *
2688  * This may get enhanced in the future to allocate from a memory pool
2689  * of the @spi_device or @spi_controller to avoid repeated allocations.
2690  */
2691 void *spi_res_alloc(struct spi_device *spi,
2692                     spi_res_release_t release,
2693                     size_t size, gfp_t gfp)
2694 {
2695         struct spi_res *sres;
2696 
2697         sres = kzalloc(sizeof(*sres) + size, gfp);
2698         if (!sres)
2699                 return NULL;
2700 
2701         INIT_LIST_HEAD(&sres->entry);
2702         sres->release = release;
2703 
2704         return sres->data;
2705 }
2706 EXPORT_SYMBOL_GPL(spi_res_alloc);
2707 
2708 /**
2709  * spi_res_free - free an spi resource
2710  * @res: pointer to the custom data of a resource
2711  *
2712  */
2713 void spi_res_free(void *res)
2714 {
2715         struct spi_res *sres = container_of(res, struct spi_res, data);
2716 
2717         if (!res)
2718                 return;
2719 
2720         WARN_ON(!list_empty(&sres->entry));
2721         kfree(sres);
2722 }
2723 EXPORT_SYMBOL_GPL(spi_res_free);
2724 
2725 /**
2726  * spi_res_add - add a spi_res to the spi_message
2727  * @message: the spi message
2728  * @res:     the spi_resource
2729  */
2730 void spi_res_add(struct spi_message *message, void *res)
2731 {
2732         struct spi_res *sres = container_of(res, struct spi_res, data);
2733 
2734         WARN_ON(!list_empty(&sres->entry));
2735         list_add_tail(&sres->entry, &message->resources);
2736 }
2737 EXPORT_SYMBOL_GPL(spi_res_add);
2738 
2739 /**
2740  * spi_res_release - release all spi resources for this message
2741  * @ctlr:  the @spi_controller
2742  * @message: the @spi_message
2743  */
2744 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2745 {
2746         struct spi_res *res, *tmp;
2747 
2748         list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
2749                 if (res->release)
2750                         res->release(ctlr, message, res->data);
2751 
2752                 list_del(&res->entry);
2753 
2754                 kfree(res);
2755         }
2756 }
2757 EXPORT_SYMBOL_GPL(spi_res_release);
2758 
2759 /*-------------------------------------------------------------------------*/
2760 
2761 /* Core methods for spi_message alterations */
2762 
2763 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2764                                             struct spi_message *msg,
2765                                             void *res)
2766 {
2767         struct spi_replaced_transfers *rxfer = res;
2768         size_t i;
2769 
2770         /* call extra callback if requested */
2771         if (rxfer->release)
2772                 rxfer->release(ctlr, msg, res);
2773 
2774         /* insert replaced transfers back into the message */
2775         list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2776 
2777         /* remove the formerly inserted entries */
2778         for (i = 0; i < rxfer->inserted; i++)
2779                 list_del(&rxfer->inserted_transfers[i].transfer_list);
2780 }
2781 
2782 /**
2783  * spi_replace_transfers - replace transfers with several transfers
2784  *                         and register change with spi_message.resources
2785  * @msg:           the spi_message we work upon
2786  * @xfer_first:    the first spi_transfer we want to replace
2787  * @remove:        number of transfers to remove
2788  * @insert:        the number of transfers we want to insert instead
2789  * @release:       extra release code necessary in some circumstances
2790  * @extradatasize: extra data to allocate (with alignment guarantees
2791  *                 of struct @spi_transfer)
2792  * @gfp:           gfp flags
2793  *
2794  * Returns: pointer to @spi_replaced_transfers,
2795  *          PTR_ERR(...) in case of errors.
2796  */
2797 struct spi_replaced_transfers *spi_replace_transfers(
2798         struct spi_message *msg,
2799         struct spi_transfer *xfer_first,
2800         size_t remove,
2801         size_t insert,
2802         spi_replaced_release_t release,
2803         size_t extradatasize,
2804         gfp_t gfp)
2805 {
2806         struct spi_replaced_transfers *rxfer;
2807         struct spi_transfer *xfer;
2808         size_t i;
2809 
2810         /* allocate the structure using spi_res */
2811         rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2812                               struct_size(rxfer, inserted_transfers, insert)
2813                               + extradatasize,
2814                               gfp);
2815         if (!rxfer)
2816                 return ERR_PTR(-ENOMEM);
2817 
2818         /* the release code to invoke before running the generic release */
2819         rxfer->release = release;
2820 
2821         /* assign extradata */
2822         if (extradatasize)
2823                 rxfer->extradata =
2824                         &rxfer->inserted_transfers[insert];
2825 
2826         /* init the replaced_transfers list */
2827         INIT_LIST_HEAD(&rxfer->replaced_transfers);
2828 
2829         /* assign the list_entry after which we should reinsert
2830          * the @replaced_transfers - it may be spi_message.messages!
2831          */
2832         rxfer->replaced_after = xfer_first->transfer_list.prev;
2833 
2834         /* remove the requested number of transfers */
2835         for (i = 0; i < remove; i++) {
2836                 /* if the entry after replaced_after it is msg->transfers
2837                  * then we have been requested to remove more transfers
2838                  * than are in the list
2839                  */
2840                 if (rxfer->replaced_after->next == &msg->transfers) {
2841                         dev_err(&msg->spi->dev,
2842                                 "requested to remove more spi_transfers than are available\n");
2843                         /* insert replaced transfers back into the message */
2844                         list_splice(&rxfer->replaced_transfers,
2845                                     rxfer->replaced_after);
2846 
2847                         /* free the spi_replace_transfer structure */
2848                         spi_res_free(rxfer);
2849 
2850                         /* and return with an error */
2851                         return ERR_PTR(-EINVAL);
2852                 }
2853 
2854                 /* remove the entry after replaced_after from list of
2855                  * transfers and add it to list of replaced_transfers
2856                  */
2857                 list_move_tail(rxfer->replaced_after->next,
2858                                &rxfer->replaced_transfers);
2859         }
2860 
2861         /* create copy of the given xfer with identical settings
2862          * based on the first transfer to get removed
2863          */
2864         for (i = 0; i < insert; i++) {
2865                 /* we need to run in reverse order */
2866                 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2867 
2868                 /* copy all spi_transfer data */
2869                 memcpy(xfer, xfer_first, sizeof(*xfer));
2870 
2871                 /* add to list */
2872                 list_add(&xfer->transfer_list, rxfer->replaced_after);
2873 
2874                 /* clear cs_change and delay_usecs for all but the last */
2875                 if (i) {
2876                         xfer->cs_change = false;
2877                         xfer->delay_usecs = 0;
2878                 }
2879         }
2880 
2881         /* set up inserted */
2882         rxfer->inserted = insert;
2883 
2884         /* and register it with spi_res/spi_message */
2885         spi_res_add(msg, rxfer);
2886 
2887         return rxfer;
2888 }
2889 EXPORT_SYMBOL_GPL(spi_replace_transfers);
2890 
2891 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2892                                         struct spi_message *msg,
2893                                         struct spi_transfer **xferp,
2894                                         size_t maxsize,
2895                                         gfp_t gfp)
2896 {
2897         struct spi_transfer *xfer = *xferp, *xfers;
2898         struct spi_replaced_transfers *srt;
2899         size_t offset;
2900         size_t count, i;
2901 
2902         /* calculate how many we have to replace */
2903         count = DIV_ROUND_UP(xfer->len, maxsize);
2904 
2905         /* create replacement */
2906         srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2907         if (IS_ERR(srt))
2908                 return PTR_ERR(srt);
2909         xfers = srt->inserted_transfers;
2910 
2911         /* now handle each of those newly inserted spi_transfers
2912          * note that the replacements spi_transfers all are preset
2913          * to the same values as *xferp, so tx_buf, rx_buf and len
2914          * are all identical (as well as most others)
2915          * so we just have to fix up len and the pointers.
2916          *
2917          * this also includes support for the depreciated
2918          * spi_message.is_dma_mapped interface
2919          */
2920 
2921         /* the first transfer just needs the length modified, so we
2922          * run it outside the loop
2923          */
2924         xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2925 
2926         /* all the others need rx_buf/tx_buf also set */
2927         for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2928                 /* update rx_buf, tx_buf and dma */
2929                 if (xfers[i].rx_buf)
2930                         xfers[i].rx_buf += offset;
2931                 if (xfers[i].rx_dma)
2932                         xfers[i].rx_dma += offset;
2933                 if (xfers[i].tx_buf)
2934                         xfers[i].tx_buf += offset;
2935                 if (xfers[i].tx_dma)
2936                         xfers[i].tx_dma += offset;
2937 
2938                 /* update length */
2939                 xfers[i].len = min(maxsize, xfers[i].len - offset);
2940         }
2941 
2942         /* we set up xferp to the last entry we have inserted,
2943          * so that we skip those already split transfers
2944          */
2945         *xferp = &xfers[count - 1];
2946 
2947         /* increment statistics counters */
2948         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2949                                        transfers_split_maxsize);
2950         SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2951                                        transfers_split_maxsize);
2952 
2953         return 0;
2954 }
2955 
2956 /**
2957  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2958  *                              when an individual transfer exceeds a
2959  *                              certain size
2960  * @ctlr:    the @spi_controller for this transfer
2961  * @msg:   the @spi_message to transform
2962  * @maxsize:  the maximum when to apply this
2963  * @gfp: GFP allocation flags
2964  *
2965  * Return: status of transformation
2966  */
2967 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2968                                 struct spi_message *msg,
2969                                 size_t maxsize,
2970                                 gfp_t gfp)
2971 {
2972         struct spi_transfer *xfer;
2973         int ret;
2974 
2975         /* iterate over the transfer_list,
2976          * but note that xfer is advanced to the last transfer inserted
2977          * to avoid checking sizes again unnecessarily (also xfer does
2978          * potentiall belong to a different list by the time the
2979          * replacement has happened
2980          */
2981         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2982                 if (xfer->len > maxsize) {
2983                         ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2984                                                            maxsize, gfp);
2985                         if (ret)
2986                                 return ret;
2987                 }
2988         }
2989 
2990         return 0;
2991 }
2992 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2993 
2994 /*-------------------------------------------------------------------------*/
2995 
2996 /* Core methods for SPI controller protocol drivers.  Some of the
2997  * other core methods are currently defined as inline functions.
2998  */
2999 
3000 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3001                                         u8 bits_per_word)
3002 {
3003         if (ctlr->bits_per_word_mask) {
3004                 /* Only 32 bits fit in the mask */
3005                 if (bits_per_word > 32)
3006                         return -EINVAL;
3007                 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3008                         return -EINVAL;
3009         }
3010 
3011         return 0;
3012 }
3013 
3014 /**
3015  * spi_setup - setup SPI mode and clock rate
3016  * @spi: the device whose settings are being modified
3017  * Context: can sleep, and no requests are queued to the device
3018  *
3019  * SPI protocol drivers may need to update the transfer mode if the
3020  * device doesn't work with its default.  They may likewise need
3021  * to update clock rates or word sizes from initial values.  This function
3022  * changes those settings, and must be called from a context that can sleep.
3023  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3024  * effect the next time the device is selected and data is transferred to
3025  * or from it.  When this function returns, the spi device is deselected.
3026  *
3027  * Note that this call will fail if the protocol driver specifies an option
3028  * that the underlying controller or its driver does not support.  For
3029  * example, not all hardware supports wire transfers using nine bit words,
3030  * LSB-first wire encoding, or active-high chipselects.
3031  *
3032  * Return: zero on success, else a negative error code.
3033  */
3034 int spi_setup(struct spi_device *spi)
3035 {
3036         unsigned        bad_bits, ugly_bits;
3037         int             status;
3038 
3039         /* check mode to prevent that DUAL and QUAD set at the same time
3040          */
3041         if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
3042                 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
3043                 dev_err(&spi->dev,
3044                 "setup: can not select dual and quad at the same time\n");
3045                 return -EINVAL;
3046         }
3047         /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3048          */
3049         if ((spi->mode & SPI_3WIRE) && (spi->mode &
3050                 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3051                  SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3052                 return -EINVAL;
3053         /* help drivers fail *cleanly* when they need options
3054          * that aren't supported with their current controller
3055          * SPI_CS_WORD has a fallback software implementation,
3056          * so it is ignored here.
3057          */
3058         bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
3059         /* nothing prevents from working with active-high CS in case if it
3060          * is driven by GPIO.
3061          */
3062         if (gpio_is_valid(spi->cs_gpio))
3063                 bad_bits &= ~SPI_CS_HIGH;
3064         ugly_bits = bad_bits &
3065                     (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3066                      SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3067         if (ugly_bits) {
3068                 dev_warn(&spi->dev,
3069                          "setup: ignoring unsupported mode bits %x\n",
3070                          ugly_bits);
3071                 spi->mode &= ~ugly_bits;
3072                 bad_bits &= ~ugly_bits;
3073         }
3074         if (bad_bits) {
3075                 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3076                         bad_bits);
3077                 return -EINVAL;
3078         }
3079 
3080         if (!spi->bits_per_word)
3081                 spi->bits_per_word = 8;
3082 
3083         status = __spi_validate_bits_per_word(spi->controller,
3084                                               spi->bits_per_word);
3085         if (status)
3086                 return status;
3087 
3088         if (!spi->max_speed_hz)
3089                 spi->max_speed_hz = spi->controller->max_speed_hz;
3090 
3091         if (spi->controller->setup)
3092                 status = spi->controller->setup(spi);
3093 
3094         spi_set_cs(spi, false);
3095 
3096         if (spi->rt && !spi->controller->rt) {
3097                 spi->controller->rt = true;
3098                 spi_set_thread_rt(spi->controller);
3099         }
3100 
3101         dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3102                         (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
3103                         (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3104                         (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3105                         (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3106                         (spi->mode & SPI_LOOP) ? "loopback, " : "",
3107                         spi->bits_per_word, spi->max_speed_hz,
3108                         status);
3109 
3110         return status;
3111 }
3112 EXPORT_SYMBOL_GPL(spi_setup);
3113 
3114 /**
3115  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3116  * @spi: the device that requires specific CS timing configuration
3117  * @setup: CS setup time in terms of clock count
3118  * @hold: CS hold time in terms of clock count
3119  * @inactive_dly: CS inactive delay between transfers in terms of clock count
3120  */
3121 void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold,
3122                        u8 inactive_dly)
3123 {
3124         if (spi->controller->set_cs_timing)
3125                 spi->controller->set_cs_timing(spi, setup, hold, inactive_dly);
3126 }
3127 EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3128 
3129 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3130 {
3131         struct spi_controller *ctlr = spi->controller;
3132         struct spi_transfer *xfer;
3133         int w_size;
3134 
3135         if (list_empty(&message->transfers))
3136                 return -EINVAL;
3137 
3138         /* If an SPI controller does not support toggling the CS line on each
3139          * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3140          * for the CS line, we can emulate the CS-per-word hardware function by
3141          * splitting transfers into one-word transfers and ensuring that
3142          * cs_change is set for each transfer.
3143          */
3144         if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3145                                           spi->cs_gpiod ||
3146                                           gpio_is_valid(spi->cs_gpio))) {
3147                 size_t maxsize;
3148                 int ret;
3149 
3150                 maxsize = (spi->bits_per_word + 7) / 8;
3151 
3152                 /* spi_split_transfers_maxsize() requires message->spi */
3153                 message->spi = spi;
3154 
3155                 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3156                                                   GFP_KERNEL);
3157                 if (ret)
3158                         return ret;
3159 
3160                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3161                         /* don't change cs_change on the last entry in the list */
3162                         if (list_is_last(&xfer->transfer_list, &message->transfers))
3163                                 break;
3164                         xfer->cs_change = 1;
3165                 }
3166         }
3167 
3168         /* Half-duplex links include original MicroWire, and ones with
3169          * only one data pin like SPI_3WIRE (switches direction) or where
3170          * either MOSI or MISO is missing.  They can also be caused by
3171          * software limitations.
3172          */
3173         if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3174             (spi->mode & SPI_3WIRE)) {
3175                 unsigned flags = ctlr->flags;
3176 
3177                 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3178                         if (xfer->rx_buf && xfer->tx_buf)
3179                                 return -EINVAL;
3180                         if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3181                                 return -EINVAL;
3182                         if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3183                                 return -EINVAL;
3184                 }
3185         }
3186 
3187         /**
3188          * Set transfer bits_per_word and max speed as spi device default if
3189          * it is not set for this transfer.
3190          * Set transfer tx_nbits and rx_nbits as single transfer default
3191          * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3192          * Ensure transfer word_delay is at least as long as that required by
3193          * device itself.
3194          */
3195         message->frame_length = 0;
3196         list_for_each_entry(xfer, &message->transfers, transfer_list) {
3197                 xfer->effective_speed_hz = 0;
3198                 message->frame_length += xfer->len;
3199                 if (!xfer->bits_per_word)
3200                         xfer->bits_per_word = spi->bits_per_word;
3201 
3202                 if (!xfer->speed_hz)
3203                         xfer->speed_hz = spi->max_speed_hz;
3204 
3205                 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3206                         xfer->speed_hz = ctlr->max_speed_hz;
3207 
3208                 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3209                         return -EINVAL;
3210 
3211                 /*
3212                  * SPI transfer length should be multiple of SPI word size
3213                  * where SPI word size should be power-of-two multiple
3214                  */
3215                 if (xfer->bits_per_word <= 8)
3216                         w_size = 1;
3217                 else if (xfer->bits_per_word <= 16)
3218                         w_size = 2;
3219                 else
3220                         w_size = 4;
3221 
3222                 /* No partial transfers accepted */
3223                 if (xfer->len % w_size)
3224                         return -EINVAL;
3225 
3226                 if (xfer->speed_hz && ctlr->min_speed_hz &&
3227                     xfer->speed_hz < ctlr->min_speed_hz)
3228                         return -EINVAL;
3229 
3230                 if (xfer->tx_buf && !xfer->tx_nbits)
3231                         xfer->tx_nbits = SPI_NBITS_SINGLE;
3232                 if (xfer->rx_buf && !xfer->rx_nbits)
3233                         xfer->rx_nbits = SPI_NBITS_SINGLE;
3234                 /* check transfer tx/rx_nbits:
3235                  * 1. check the value matches one of single, dual and quad
3236                  * 2. check tx/rx_nbits match the mode in spi_device
3237                  */
3238                 if (xfer->tx_buf) {
3239                         if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3240                                 xfer->tx_nbits != SPI_NBITS_DUAL &&
3241                                 xfer->tx_nbits != SPI_NBITS_QUAD)
3242                                 return -EINVAL;
3243                         if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3244                                 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3245                                 return -EINVAL;
3246                         if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3247                                 !(spi->mode & SPI_TX_QUAD))
3248                                 return -EINVAL;
3249                 }
3250                 /* check transfer rx_nbits */
3251                 if (xfer->rx_buf) {
3252                         if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3253                                 xfer->rx_nbits != SPI_NBITS_DUAL &&
3254                                 xfer->rx_nbits != SPI_NBITS_QUAD)
3255                                 return -EINVAL;
3256                         if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3257                                 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3258                                 return -EINVAL;
3259                         if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3260                                 !(spi->mode & SPI_RX_QUAD))
3261                                 return -EINVAL;
3262                 }
3263 
3264                 if (xfer->word_delay_usecs < spi->word_delay_usecs)
3265                         xfer->word_delay_usecs = spi->word_delay_usecs;
3266         }
3267 
3268         message->status = -EINPROGRESS;
3269 
3270         return 0;
3271 }
3272 
3273 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3274 {
3275         struct spi_controller *ctlr = spi->controller;
3276 
3277         /*
3278          * Some controllers do not support doing regular SPI transfers. Return
3279          * ENOTSUPP when this is the case.
3280          */
3281         if (!ctlr->transfer)
3282                 return -ENOTSUPP;
3283 
3284         message->spi = spi;
3285 
3286         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3287         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3288 
3289         trace_spi_message_submit(message);
3290 
3291         return ctlr->transfer(spi, message);
3292 }
3293 
3294 /**
3295  * spi_async - asynchronous SPI transfer
3296  * @spi: device with which data will be exchanged
3297  * @message: describes the data transfers, including completion callback
3298  * Context: any (irqs may be blocked, etc)
3299  *
3300  * This call may be used in_irq and other contexts which can't sleep,
3301  * as well as from task contexts which can sleep.
3302  *
3303  * The completion callback is invoked in a context which can't sleep.
3304  * Before that invocation, the value of message->status is undefined.
3305  * When the callback is issued, message->status holds either zero (to
3306  * indicate complete success) or a negative error code.  After that
3307  * callback returns, the driver which issued the transfer request may
3308  * deallocate the associated memory; it's no longer in use by any SPI
3309  * core or controller driver code.
3310  *
3311  * Note that although all messages to a spi_device are handled in
3312  * FIFO order, messages may go to different devices in other orders.
3313  * Some device might be higher priority, or have various "hard" access
3314  * time requirements, for example.
3315  *
3316  * On detection of any fault during the transfer, processing of
3317  * the entire message is aborted, and the device is deselected.
3318  * Until returning from the associated message completion callback,
3319  * no other spi_message queued to that device will be processed.
3320  * (This rule applies equally to all the synchronous transfer calls,
3321  * which are wrappers around this core asynchronous primitive.)
3322  *
3323  * Return: zero on success, else a negative error code.
3324  */
3325 int spi_async(struct spi_device *spi, struct spi_message *message)
3326 {
3327         struct spi_controller *ctlr = spi->controller;
3328         int ret;
3329         unsigned long flags;
3330 
3331         ret = __spi_validate(spi, message);
3332         if (ret != 0)
3333                 return ret;
3334 
3335         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3336 
3337         if (ctlr->bus_lock_flag)
3338                 ret = -EBUSY;
3339         else
3340                 ret = __spi_async(spi, message);
3341 
3342         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3343 
3344         return ret;
3345 }
3346 EXPORT_SYMBOL_GPL(spi_async);
3347 
3348 /**
3349  * spi_async_locked - version of spi_async with exclusive bus usage
3350  * @spi: device with which data will be exchanged
3351  * @message: describes the data transfers, including completion callback
3352  * Context: any (irqs may be blocked, etc)
3353  *
3354  * This call may be used in_irq and other contexts which can't sleep,
3355  * as well as from task contexts which can sleep.
3356  *
3357  * The completion callback is invoked in a context which can't sleep.
3358  * Before that invocation, the value of message->status is undefined.
3359  * When the callback is issued, message->status holds either zero (to
3360  * indicate complete success) or a negative error code.  After that
3361  * callback returns, the driver which issued the transfer request may
3362  * deallocate the associated memory; it's no longer in use by any SPI
3363  * core or controller driver code.
3364  *
3365  * Note that although all messages to a spi_device are handled in
3366  * FIFO order, messages may go to different devices in other orders.
3367  * Some device might be higher priority, or have various "hard" access
3368  * time requirements, for example.
3369  *
3370  * On detection of any fault during the transfer, processing of
3371  * the entire message is aborted, and the device is deselected.
3372  * Until returning from the associated message completion callback,
3373  * no other spi_message queued to that device will be processed.
3374  * (This rule applies equally to all the synchronous transfer calls,
3375  * which are wrappers around this core asynchronous primitive.)
3376  *
3377  * Return: zero on success, else a negative error code.
3378  */
3379 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3380 {
3381         struct spi_controller *ctlr = spi->controller;
3382         int ret;
3383         unsigned long flags;
3384 
3385         ret = __spi_validate(spi, message);
3386         if (ret != 0)
3387                 return ret;
3388 
3389         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3390 
3391         ret = __spi_async(spi, message);
3392 
3393         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3394 
3395         return ret;
3396 
3397 }
3398 EXPORT_SYMBOL_GPL(spi_async_locked);
3399 
3400 /*-------------------------------------------------------------------------*/
3401 
3402 /* Utility methods for SPI protocol drivers, layered on
3403  * top of the core.  Some other utility methods are defined as
3404  * inline functions.
3405  */
3406 
3407 static void spi_complete(void *arg)
3408 {
3409         complete(arg);
3410 }
3411 
3412 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3413 {
3414         DECLARE_COMPLETION_ONSTACK(done);
3415         int status;
3416         struct spi_controller *ctlr = spi->controller;
3417         unsigned long flags;
3418 
3419         status = __spi_validate(spi, message);
3420         if (status != 0)
3421                 return status;
3422 
3423         message->complete = spi_complete;
3424         message->context = &done;
3425         message->spi = spi;
3426 
3427         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3428         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3429 
3430         /* If we're not using the legacy transfer method then we will
3431          * try to transfer in the calling context so special case.
3432          * This code would be less tricky if we could remove the
3433          * support for driver implemented message queues.
3434          */
3435         if (ctlr->transfer == spi_queued_transfer) {
3436                 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3437 
3438                 trace_spi_message_submit(message);
3439 
3440                 status = __spi_queued_transfer(spi, message, false);
3441 
3442                 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3443         } else {
3444                 status = spi_async_locked(spi, message);
3445         }
3446 
3447         if (status == 0) {
3448                 /* Push out the messages in the calling context if we
3449                  * can.
3450                  */
3451                 if (ctlr->transfer == spi_queued_transfer) {
3452                         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3453                                                        spi_sync_immediate);
3454                         SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3455                                                        spi_sync_immediate);
3456                         __spi_pump_messages(ctlr, false);
3457                 }
3458 
3459                 wait_for_completion(&done);
3460                 status = message->status;
3461         }
3462         message->context = NULL;
3463         return status;
3464 }
3465 
3466 /**
3467  * spi_sync - blocking/synchronous SPI data transfers
3468  * @spi: device with which data will be exchanged
3469  * @message: describes the data transfers
3470  * Context: can sleep
3471  *
3472  * This call may only be used from a context that may sleep.  The sleep
3473  * is non-interruptible, and has no timeout.  Low-overhead controller
3474  * drivers may DMA directly into and out of the message buffers.
3475  *
3476  * Note that the SPI device's chip select is active during the message,
3477  * and then is normally disabled between messages.  Drivers for some
3478  * frequently-used devices may want to minimize costs of selecting a chip,
3479  * by leaving it selected in anticipation that the next message will go
3480  * to the same chip.  (That may increase power usage.)
3481  *
3482  * Also, the caller is guaranteeing that the memory associated with the
3483  * message will not be freed before this call returns.
3484  *
3485  * Return: zero on success, else a negative error code.
3486  */
3487 int spi_sync(struct spi_device *spi, struct spi_message *message)
3488 {
3489         int ret;
3490 
3491         mutex_lock(&spi->controller->bus_lock_mutex);
3492         ret = __spi_sync(spi, message);
3493         mutex_unlock(&spi->controller->bus_lock_mutex);
3494 
3495         return ret;
3496 }
3497 EXPORT_SYMBOL_GPL(spi_sync);
3498 
3499 /**
3500  * spi_sync_locked - version of spi_sync with exclusive bus usage
3501  * @spi: device with which data will be exchanged
3502  * @message: describes the data transfers
3503  * Context: can sleep
3504  *
3505  * This call may only be used from a context that may sleep.  The sleep
3506  * is non-interruptible, and has no timeout.  Low-overhead controller
3507  * drivers may DMA directly into and out of the message buffers.
3508  *
3509  * This call should be used by drivers that require exclusive access to the
3510  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3511  * be released by a spi_bus_unlock call when the exclusive access is over.
3512  *
3513  * Return: zero on success, else a negative error code.
3514  */
3515 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3516 {
3517         return __spi_sync(spi, message);
3518 }
3519 EXPORT_SYMBOL_GPL(spi_sync_locked);
3520 
3521 /**
3522  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3523  * @ctlr: SPI bus master that should be locked for exclusive bus access
3524  * Context: can sleep
3525  *
3526  * This call may only be used from a context that may sleep.  The sleep
3527  * is non-interruptible, and has no timeout.
3528  *
3529  * This call should be used by drivers that require exclusive access to the
3530  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3531  * exclusive access is over. Data transfer must be done by spi_sync_locked
3532  * and spi_async_locked calls when the SPI bus lock is held.
3533  *
3534  * Return: always zero.
3535  */
3536 int spi_bus_lock(struct spi_controller *ctlr)
3537 {
3538         unsigned long flags;
3539 
3540         mutex_lock(&ctlr->bus_lock_mutex);
3541 
3542         spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3543         ctlr->bus_lock_flag = 1;
3544         spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3545 
3546         /* mutex remains locked until spi_bus_unlock is called */
3547 
3548         return 0;
3549 }
3550 EXPORT_SYMBOL_GPL(spi_bus_lock);
3551 
3552 /**
3553  * spi_bus_unlock - release the lock for exclusive SPI bus usage
3554  * @ctlr: SPI bus master that was locked for exclusive bus access
3555  * Context: can sleep
3556  *
3557  * This call may only be used from a context that may sleep.  The sleep
3558  * is non-interruptible, and has no timeout.
3559  *
3560  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3561  * call.
3562  *
3563  * Return: always zero.
3564  */
3565 int spi_bus_unlock(struct spi_controller *ctlr)
3566 {
3567         ctlr->bus_lock_flag = 0;
3568 
3569         mutex_unlock(&ctlr->bus_lock_mutex);
3570 
3571         return 0;
3572 }
3573 EXPORT_SYMBOL_GPL(spi_bus_unlock);
3574 
3575 /* portable code must never pass more than 32 bytes */
3576 #define SPI_BUFSIZ      max(32, SMP_CACHE_BYTES)
3577 
3578 static u8       *buf;
3579 
3580 /**
3581  * spi_write_then_read - SPI synchronous write followed by read
3582  * @spi: device with which data will be exchanged
3583  * @txbuf: data to be written (need not be dma-safe)
3584  * @n_tx: size of txbuf, in bytes
3585  * @rxbuf: buffer into which data will be read (need not be dma-safe)
3586  * @n_rx: size of rxbuf, in bytes
3587  * Context: can sleep
3588  *
3589  * This performs a half duplex MicroWire style transaction with the
3590  * device, sending txbuf and then reading rxbuf.  The return value
3591  * is zero for success, else a negative errno status code.
3592  * This call may only be used from a context that may sleep.
3593  *
3594  * Parameters to this routine are always copied using a small buffer;
3595  * portable code should never use this for more than 32 bytes.
3596  * Performance-sensitive or bulk transfer code should instead use
3597  * spi_{async,sync}() calls with dma-safe buffers.
3598  *
3599  * Return: zero on success, else a negative error code.
3600  */
3601 int spi_write_then_read(struct spi_device *spi,
3602                 const void *txbuf, unsigned n_tx,
3603                 void *rxbuf, unsigned n_rx)
3604 {
3605         static DEFINE_MUTEX(lock);
3606 
3607         int                     status;
3608         struct spi_message      message;
3609         struct spi_transfer     x[2];
3610         u8                      *local_buf;
3611 
3612         /* Use preallocated DMA-safe buffer if we can.  We can't avoid
3613          * copying here, (as a pure convenience thing), but we can
3614          * keep heap costs out of the hot path unless someone else is
3615          * using the pre-allocated buffer or the transfer is too large.
3616          */
3617         if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3618                 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3619                                     GFP_KERNEL | GFP_DMA);
3620                 if (!local_buf)
3621                         return -ENOMEM;
3622         } else {
3623                 local_buf = buf;
3624         }
3625 
3626         spi_message_init(&message);
3627         memset(x, 0, sizeof(x));
3628         if (n_tx) {
3629                 x[0].len = n_tx;
3630                 spi_message_add_tail(&x[0], &message);
3631         }
3632         if (n_rx) {
3633                 x[1].len = n_rx;
3634                 spi_message_add_tail(&x[1], &message);
3635         }
3636 
3637         memcpy(local_buf, txbuf, n_tx);
3638         x[0].tx_buf = local_buf;
3639         x[1].rx_buf = local_buf + n_tx;
3640 
3641         /* do the i/o */
3642         status = spi_sync(spi, &message);
3643         if (status == 0)
3644                 memcpy(rxbuf, x[1].rx_buf, n_rx);
3645 
3646         if (x[0].tx_buf == buf)
3647                 mutex_unlock(&lock);
3648         else
3649                 kfree(local_buf);
3650 
3651         return status;
3652 }
3653 EXPORT_SYMBOL_GPL(spi_write_then_read);
3654 
3655 /*-------------------------------------------------------------------------*/
3656 
3657 #if IS_ENABLED(CONFIG_OF)
3658 /* must call put_device() when done with returned spi_device device */
3659 struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3660 {
3661         struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
3662 
3663         return dev ? to_spi_device(dev) : NULL;
3664 }
3665 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
3666 #endif /* IS_ENABLED(CONFIG_OF) */
3667 
3668 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3669 /* the spi controllers are not using spi_bus, so we find it with another way */
3670 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3671 {
3672         struct device *dev;
3673 
3674         dev = class_find_device_by_of_node(&spi_master_class, node);
3675         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3676                 dev = class_find_device_by_of_node(&spi_slave_class, node);
3677         if (!dev)
3678                 return NULL;
3679 
3680         /* reference got in class_find_device */
3681         return container_of(dev, struct spi_controller, dev);
3682 }
3683 
3684 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3685                          void *arg)
3686 {
3687         struct of_reconfig_data *rd = arg;
3688         struct spi_controller *ctlr;
3689         struct spi_device *spi;
3690 
3691         switch (of_reconfig_get_state_change(action, arg)) {
3692         case OF_RECONFIG_CHANGE_ADD:
3693                 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3694                 if (ctlr == NULL)
3695                         return NOTIFY_OK;       /* not for us */
3696 
3697                 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3698                         put_device(&ctlr->dev);
3699                         return NOTIFY_OK;
3700                 }
3701 
3702                 spi = of_register_spi_device(ctlr, rd->dn);
3703                 put_device(&ctlr->dev);
3704 
3705                 if (IS_ERR(spi)) {
3706                         pr_err("%s: failed to create for '%pOF'\n",
3707                                         __func__, rd->dn);
3708                         of_node_clear_flag(rd->dn, OF_POPULATED);
3709                         return notifier_from_errno(PTR_ERR(spi));
3710                 }
3711                 break;
3712 
3713         case OF_RECONFIG_CHANGE_REMOVE:
3714                 /* already depopulated? */
3715                 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3716                         return NOTIFY_OK;
3717 
3718                 /* find our device by node */
3719                 spi = of_find_spi_device_by_node(rd->dn);
3720                 if (spi == NULL)
3721                         return NOTIFY_OK;       /* no? not meant for us */
3722 
3723                 /* unregister takes one ref away */
3724                 spi_unregister_device(spi);
3725 
3726                 /* and put the reference of the find */
3727                 put_device(&spi->dev);
3728                 break;
3729         }
3730 
3731         return NOTIFY_OK;
3732 }
3733 
3734 static struct notifier_block spi_of_notifier = {
3735         .notifier_call = of_spi_notify,
3736 };
3737 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3738 extern struct notifier_block spi_of_notifier;
3739 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3740 
3741 #if IS_ENABLED(CONFIG_ACPI)
3742 static int spi_acpi_controller_match(struct device *dev, const void *data)
3743 {
3744         return ACPI_COMPANION(dev->parent) == data;
3745 }
3746 
3747 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3748 {
3749         struct device *dev;
3750 
3751         dev = class_find_device(&spi_master_class, NULL, adev,
3752                                 spi_acpi_controller_match);
3753         if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3754                 dev = class_find_device(&spi_slave_class, NULL, adev,
3755                                         spi_acpi_controller_match);
3756         if (!dev)
3757                 return NULL;
3758 
3759         return container_of(dev, struct spi_controller, dev);
3760 }
3761 
3762 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3763 {
3764         struct device *dev;
3765 
3766         dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
3767         return dev ? to_spi_device(dev) : NULL;
3768 }
3769 
3770 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3771                            void *arg)
3772 {
3773         struct acpi_device *adev = arg;
3774         struct spi_controller *ctlr;
3775         struct spi_device *spi;
3776 
3777         switch (value) {
3778         case ACPI_RECONFIG_DEVICE_ADD:
3779                 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3780                 if (!ctlr)
3781                         break;
3782 
3783                 acpi_register_spi_device(ctlr, adev);
3784                 put_device(&ctlr->dev);
3785                 break;
3786         case ACPI_RECONFIG_DEVICE_REMOVE:
3787                 if (!acpi_device_enumerated(adev))
3788                         break;
3789 
3790                 spi = acpi_spi_find_device_by_adev(adev);
3791                 if (!spi)
3792                         break;
3793 
3794                 spi_unregister_device(spi);
3795                 put_device(&spi->dev);
3796                 break;
3797         }
3798 
3799         return NOTIFY_OK;
3800 }
3801 
3802 static struct notifier_block spi_acpi_notifier = {
3803         .notifier_call = acpi_spi_notify,
3804 };
3805 #else
3806 extern struct notifier_block spi_acpi_notifier;
3807 #endif
3808 
3809 static int __init spi_init(void)
3810 {
3811         int     status;
3812 
3813         buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3814         if (!buf) {
3815                 status = -ENOMEM;
3816                 goto err0;
3817         }
3818 
3819         status = bus_register(&spi_bus_type);
3820         if (status < 0)
3821                 goto err1;
3822 
3823         status = class_register(&spi_master_class);
3824         if (status < 0)
3825                 goto err2;
3826 
3827         if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3828                 status = class_register(&spi_slave_class);
3829                 if (status < 0)
3830                         goto err3;
3831         }
3832 
3833         if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3834                 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3835         if (IS_ENABLED(CONFIG_ACPI))
3836                 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3837 
3838         return 0;
3839 
3840 err3:
3841         class_unregister(&spi_master_class);
3842 err2:
3843         bus_unregister(&spi_bus_type);
3844 err1:
3845         kfree(buf);
3846         buf = NULL;
3847 err0:
3848         return status;
3849 }
3850 
3851 /* board_info is normally registered in arch_initcall(),
3852  * but even essential drivers wait till later
3853  *
3854  * REVISIT only boardinfo really needs static linking. the rest (device and
3855  * driver registration) _could_ be dynamically linked (modular) ... costs
3856  * include needing to have boardinfo data structures be much more public.
3857  */
3858 postcore_initcall(spi_init);

/* [<][>][^][v][top][bottom][index][help] */