1/* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18#include <linux/kernel.h> 19#include <linux/device.h> 20#include <linux/init.h> 21#include <linux/cache.h> 22#include <linux/dma-mapping.h> 23#include <linux/dmaengine.h> 24#include <linux/mutex.h> 25#include <linux/of_device.h> 26#include <linux/of_irq.h> 27#include <linux/clk/clk-conf.h> 28#include <linux/slab.h> 29#include <linux/mod_devicetable.h> 30#include <linux/spi/spi.h> 31#include <linux/of_gpio.h> 32#include <linux/pm_runtime.h> 33#include <linux/pm_domain.h> 34#include <linux/export.h> 35#include <linux/sched/rt.h> 36#include <linux/delay.h> 37#include <linux/kthread.h> 38#include <linux/ioport.h> 39#include <linux/acpi.h> 40 41#define CREATE_TRACE_POINTS 42#include <trace/events/spi.h> 43 44static void spidev_release(struct device *dev) 45{ 46 struct spi_device *spi = to_spi_device(dev); 47 48 /* spi masters may cleanup for released devices */ 49 if (spi->master->cleanup) 50 spi->master->cleanup(spi); 51 52 spi_master_put(spi->master); 53 kfree(spi); 54} 55 56static ssize_t 57modalias_show(struct device *dev, struct device_attribute *a, char *buf) 58{ 59 const struct spi_device *spi = to_spi_device(dev); 60 int len; 61 62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 63 if (len != -ENODEV) 64 return len; 65 66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 67} 68static DEVICE_ATTR_RO(modalias); 69 70static struct attribute *spi_dev_attrs[] = { 71 &dev_attr_modalias.attr, 72 NULL, 73}; 74ATTRIBUTE_GROUPS(spi_dev); 75 76/* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 77 * and the sysfs version makes coldplug work too. 78 */ 79 80static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 81 const struct spi_device *sdev) 82{ 83 while (id->name[0]) { 84 if (!strcmp(sdev->modalias, id->name)) 85 return id; 86 id++; 87 } 88 return NULL; 89} 90 91const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 92{ 93 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 94 95 return spi_match_id(sdrv->id_table, sdev); 96} 97EXPORT_SYMBOL_GPL(spi_get_device_id); 98 99static int spi_match_device(struct device *dev, struct device_driver *drv) 100{ 101 const struct spi_device *spi = to_spi_device(dev); 102 const struct spi_driver *sdrv = to_spi_driver(drv); 103 104 /* Attempt an OF style match */ 105 if (of_driver_match_device(dev, drv)) 106 return 1; 107 108 /* Then try ACPI */ 109 if (acpi_driver_match_device(dev, drv)) 110 return 1; 111 112 if (sdrv->id_table) 113 return !!spi_match_id(sdrv->id_table, spi); 114 115 return strcmp(spi->modalias, drv->name) == 0; 116} 117 118static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 119{ 120 const struct spi_device *spi = to_spi_device(dev); 121 int rc; 122 123 rc = acpi_device_uevent_modalias(dev, env); 124 if (rc != -ENODEV) 125 return rc; 126 127 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 128 return 0; 129} 130 131struct bus_type spi_bus_type = { 132 .name = "spi", 133 .dev_groups = spi_dev_groups, 134 .match = spi_match_device, 135 .uevent = spi_uevent, 136}; 137EXPORT_SYMBOL_GPL(spi_bus_type); 138 139 140static int spi_drv_probe(struct device *dev) 141{ 142 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 143 int ret; 144 145 ret = of_clk_set_defaults(dev->of_node, false); 146 if (ret) 147 return ret; 148 149 ret = dev_pm_domain_attach(dev, true); 150 if (ret != -EPROBE_DEFER) { 151 ret = sdrv->probe(to_spi_device(dev)); 152 if (ret) 153 dev_pm_domain_detach(dev, true); 154 } 155 156 return ret; 157} 158 159static int spi_drv_remove(struct device *dev) 160{ 161 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 162 int ret; 163 164 ret = sdrv->remove(to_spi_device(dev)); 165 dev_pm_domain_detach(dev, true); 166 167 return ret; 168} 169 170static void spi_drv_shutdown(struct device *dev) 171{ 172 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 173 174 sdrv->shutdown(to_spi_device(dev)); 175} 176 177/** 178 * spi_register_driver - register a SPI driver 179 * @sdrv: the driver to register 180 * Context: can sleep 181 */ 182int spi_register_driver(struct spi_driver *sdrv) 183{ 184 sdrv->driver.bus = &spi_bus_type; 185 if (sdrv->probe) 186 sdrv->driver.probe = spi_drv_probe; 187 if (sdrv->remove) 188 sdrv->driver.remove = spi_drv_remove; 189 if (sdrv->shutdown) 190 sdrv->driver.shutdown = spi_drv_shutdown; 191 return driver_register(&sdrv->driver); 192} 193EXPORT_SYMBOL_GPL(spi_register_driver); 194 195/*-------------------------------------------------------------------------*/ 196 197/* SPI devices should normally not be created by SPI device drivers; that 198 * would make them board-specific. Similarly with SPI master drivers. 199 * Device registration normally goes into like arch/.../mach.../board-YYY.c 200 * with other readonly (flashable) information about mainboard devices. 201 */ 202 203struct boardinfo { 204 struct list_head list; 205 struct spi_board_info board_info; 206}; 207 208static LIST_HEAD(board_list); 209static LIST_HEAD(spi_master_list); 210 211/* 212 * Used to protect add/del opertion for board_info list and 213 * spi_master list, and their matching process 214 */ 215static DEFINE_MUTEX(board_lock); 216 217/** 218 * spi_alloc_device - Allocate a new SPI device 219 * @master: Controller to which device is connected 220 * Context: can sleep 221 * 222 * Allows a driver to allocate and initialize a spi_device without 223 * registering it immediately. This allows a driver to directly 224 * fill the spi_device with device parameters before calling 225 * spi_add_device() on it. 226 * 227 * Caller is responsible to call spi_add_device() on the returned 228 * spi_device structure to add it to the SPI master. If the caller 229 * needs to discard the spi_device without adding it, then it should 230 * call spi_dev_put() on it. 231 * 232 * Returns a pointer to the new device, or NULL. 233 */ 234struct spi_device *spi_alloc_device(struct spi_master *master) 235{ 236 struct spi_device *spi; 237 238 if (!spi_master_get(master)) 239 return NULL; 240 241 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 242 if (!spi) { 243 spi_master_put(master); 244 return NULL; 245 } 246 247 spi->master = master; 248 spi->dev.parent = &master->dev; 249 spi->dev.bus = &spi_bus_type; 250 spi->dev.release = spidev_release; 251 spi->cs_gpio = -ENOENT; 252 device_initialize(&spi->dev); 253 return spi; 254} 255EXPORT_SYMBOL_GPL(spi_alloc_device); 256 257static void spi_dev_set_name(struct spi_device *spi) 258{ 259 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 260 261 if (adev) { 262 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 263 return; 264 } 265 266 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 267 spi->chip_select); 268} 269 270static int spi_dev_check(struct device *dev, void *data) 271{ 272 struct spi_device *spi = to_spi_device(dev); 273 struct spi_device *new_spi = data; 274 275 if (spi->master == new_spi->master && 276 spi->chip_select == new_spi->chip_select) 277 return -EBUSY; 278 return 0; 279} 280 281/** 282 * spi_add_device - Add spi_device allocated with spi_alloc_device 283 * @spi: spi_device to register 284 * 285 * Companion function to spi_alloc_device. Devices allocated with 286 * spi_alloc_device can be added onto the spi bus with this function. 287 * 288 * Returns 0 on success; negative errno on failure 289 */ 290int spi_add_device(struct spi_device *spi) 291{ 292 static DEFINE_MUTEX(spi_add_lock); 293 struct spi_master *master = spi->master; 294 struct device *dev = master->dev.parent; 295 int status; 296 297 /* Chipselects are numbered 0..max; validate. */ 298 if (spi->chip_select >= master->num_chipselect) { 299 dev_err(dev, "cs%d >= max %d\n", 300 spi->chip_select, 301 master->num_chipselect); 302 return -EINVAL; 303 } 304 305 /* Set the bus ID string */ 306 spi_dev_set_name(spi); 307 308 /* We need to make sure there's no other device with this 309 * chipselect **BEFORE** we call setup(), else we'll trash 310 * its configuration. Lock against concurrent add() calls. 311 */ 312 mutex_lock(&spi_add_lock); 313 314 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 315 if (status) { 316 dev_err(dev, "chipselect %d already in use\n", 317 spi->chip_select); 318 goto done; 319 } 320 321 if (master->cs_gpios) 322 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 323 324 /* Drivers may modify this initial i/o setup, but will 325 * normally rely on the device being setup. Devices 326 * using SPI_CS_HIGH can't coexist well otherwise... 327 */ 328 status = spi_setup(spi); 329 if (status < 0) { 330 dev_err(dev, "can't setup %s, status %d\n", 331 dev_name(&spi->dev), status); 332 goto done; 333 } 334 335 /* Device may be bound to an active driver when this returns */ 336 status = device_add(&spi->dev); 337 if (status < 0) 338 dev_err(dev, "can't add %s, status %d\n", 339 dev_name(&spi->dev), status); 340 else 341 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 342 343done: 344 mutex_unlock(&spi_add_lock); 345 return status; 346} 347EXPORT_SYMBOL_GPL(spi_add_device); 348 349/** 350 * spi_new_device - instantiate one new SPI device 351 * @master: Controller to which device is connected 352 * @chip: Describes the SPI device 353 * Context: can sleep 354 * 355 * On typical mainboards, this is purely internal; and it's not needed 356 * after board init creates the hard-wired devices. Some development 357 * platforms may not be able to use spi_register_board_info though, and 358 * this is exported so that for example a USB or parport based adapter 359 * driver could add devices (which it would learn about out-of-band). 360 * 361 * Returns the new device, or NULL. 362 */ 363struct spi_device *spi_new_device(struct spi_master *master, 364 struct spi_board_info *chip) 365{ 366 struct spi_device *proxy; 367 int status; 368 369 /* NOTE: caller did any chip->bus_num checks necessary. 370 * 371 * Also, unless we change the return value convention to use 372 * error-or-pointer (not NULL-or-pointer), troubleshootability 373 * suggests syslogged diagnostics are best here (ugh). 374 */ 375 376 proxy = spi_alloc_device(master); 377 if (!proxy) 378 return NULL; 379 380 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 381 382 proxy->chip_select = chip->chip_select; 383 proxy->max_speed_hz = chip->max_speed_hz; 384 proxy->mode = chip->mode; 385 proxy->irq = chip->irq; 386 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 387 proxy->dev.platform_data = (void *) chip->platform_data; 388 proxy->controller_data = chip->controller_data; 389 proxy->controller_state = NULL; 390 391 status = spi_add_device(proxy); 392 if (status < 0) { 393 spi_dev_put(proxy); 394 return NULL; 395 } 396 397 return proxy; 398} 399EXPORT_SYMBOL_GPL(spi_new_device); 400 401static void spi_match_master_to_boardinfo(struct spi_master *master, 402 struct spi_board_info *bi) 403{ 404 struct spi_device *dev; 405 406 if (master->bus_num != bi->bus_num) 407 return; 408 409 dev = spi_new_device(master, bi); 410 if (!dev) 411 dev_err(master->dev.parent, "can't create new device for %s\n", 412 bi->modalias); 413} 414 415/** 416 * spi_register_board_info - register SPI devices for a given board 417 * @info: array of chip descriptors 418 * @n: how many descriptors are provided 419 * Context: can sleep 420 * 421 * Board-specific early init code calls this (probably during arch_initcall) 422 * with segments of the SPI device table. Any device nodes are created later, 423 * after the relevant parent SPI controller (bus_num) is defined. We keep 424 * this table of devices forever, so that reloading a controller driver will 425 * not make Linux forget about these hard-wired devices. 426 * 427 * Other code can also call this, e.g. a particular add-on board might provide 428 * SPI devices through its expansion connector, so code initializing that board 429 * would naturally declare its SPI devices. 430 * 431 * The board info passed can safely be __initdata ... but be careful of 432 * any embedded pointers (platform_data, etc), they're copied as-is. 433 */ 434int spi_register_board_info(struct spi_board_info const *info, unsigned n) 435{ 436 struct boardinfo *bi; 437 int i; 438 439 if (!n) 440 return -EINVAL; 441 442 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 443 if (!bi) 444 return -ENOMEM; 445 446 for (i = 0; i < n; i++, bi++, info++) { 447 struct spi_master *master; 448 449 memcpy(&bi->board_info, info, sizeof(*info)); 450 mutex_lock(&board_lock); 451 list_add_tail(&bi->list, &board_list); 452 list_for_each_entry(master, &spi_master_list, list) 453 spi_match_master_to_boardinfo(master, &bi->board_info); 454 mutex_unlock(&board_lock); 455 } 456 457 return 0; 458} 459 460/*-------------------------------------------------------------------------*/ 461 462static void spi_set_cs(struct spi_device *spi, bool enable) 463{ 464 if (spi->mode & SPI_CS_HIGH) 465 enable = !enable; 466 467 if (spi->cs_gpio >= 0) 468 gpio_set_value(spi->cs_gpio, !enable); 469 else if (spi->master->set_cs) 470 spi->master->set_cs(spi, !enable); 471} 472 473#ifdef CONFIG_HAS_DMA 474static int spi_map_buf(struct spi_master *master, struct device *dev, 475 struct sg_table *sgt, void *buf, size_t len, 476 enum dma_data_direction dir) 477{ 478 const bool vmalloced_buf = is_vmalloc_addr(buf); 479 const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; 480 const int sgs = DIV_ROUND_UP(len, desc_len); 481 struct page *vm_page; 482 void *sg_buf; 483 size_t min; 484 int i, ret; 485 486 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 487 if (ret != 0) 488 return ret; 489 490 for (i = 0; i < sgs; i++) { 491 min = min_t(size_t, len, desc_len); 492 493 if (vmalloced_buf) { 494 vm_page = vmalloc_to_page(buf); 495 if (!vm_page) { 496 sg_free_table(sgt); 497 return -ENOMEM; 498 } 499 sg_set_page(&sgt->sgl[i], vm_page, 500 min, offset_in_page(buf)); 501 } else { 502 sg_buf = buf; 503 sg_set_buf(&sgt->sgl[i], sg_buf, min); 504 } 505 506 507 buf += min; 508 len -= min; 509 } 510 511 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 512 if (!ret) 513 ret = -ENOMEM; 514 if (ret < 0) { 515 sg_free_table(sgt); 516 return ret; 517 } 518 519 sgt->nents = ret; 520 521 return 0; 522} 523 524static void spi_unmap_buf(struct spi_master *master, struct device *dev, 525 struct sg_table *sgt, enum dma_data_direction dir) 526{ 527 if (sgt->orig_nents) { 528 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 529 sg_free_table(sgt); 530 } 531} 532 533static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 534{ 535 struct device *tx_dev, *rx_dev; 536 struct spi_transfer *xfer; 537 int ret; 538 539 if (!master->can_dma) 540 return 0; 541 542 tx_dev = master->dma_tx->device->dev; 543 rx_dev = master->dma_rx->device->dev; 544 545 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 546 if (!master->can_dma(master, msg->spi, xfer)) 547 continue; 548 549 if (xfer->tx_buf != NULL) { 550 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 551 (void *)xfer->tx_buf, xfer->len, 552 DMA_TO_DEVICE); 553 if (ret != 0) 554 return ret; 555 } 556 557 if (xfer->rx_buf != NULL) { 558 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 559 xfer->rx_buf, xfer->len, 560 DMA_FROM_DEVICE); 561 if (ret != 0) { 562 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 563 DMA_TO_DEVICE); 564 return ret; 565 } 566 } 567 } 568 569 master->cur_msg_mapped = true; 570 571 return 0; 572} 573 574static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 575{ 576 struct spi_transfer *xfer; 577 struct device *tx_dev, *rx_dev; 578 579 if (!master->cur_msg_mapped || !master->can_dma) 580 return 0; 581 582 tx_dev = master->dma_tx->device->dev; 583 rx_dev = master->dma_rx->device->dev; 584 585 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 586 /* 587 * Restore the original value of tx_buf or rx_buf if they are 588 * NULL. 589 */ 590 if (xfer->tx_buf == master->dummy_tx) 591 xfer->tx_buf = NULL; 592 if (xfer->rx_buf == master->dummy_rx) 593 xfer->rx_buf = NULL; 594 595 if (!master->can_dma(master, msg->spi, xfer)) 596 continue; 597 598 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 599 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 600 } 601 602 return 0; 603} 604#else /* !CONFIG_HAS_DMA */ 605static inline int __spi_map_msg(struct spi_master *master, 606 struct spi_message *msg) 607{ 608 return 0; 609} 610 611static inline int spi_unmap_msg(struct spi_master *master, 612 struct spi_message *msg) 613{ 614 return 0; 615} 616#endif /* !CONFIG_HAS_DMA */ 617 618static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 619{ 620 struct spi_transfer *xfer; 621 void *tmp; 622 unsigned int max_tx, max_rx; 623 624 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 625 max_tx = 0; 626 max_rx = 0; 627 628 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 629 if ((master->flags & SPI_MASTER_MUST_TX) && 630 !xfer->tx_buf) 631 max_tx = max(xfer->len, max_tx); 632 if ((master->flags & SPI_MASTER_MUST_RX) && 633 !xfer->rx_buf) 634 max_rx = max(xfer->len, max_rx); 635 } 636 637 if (max_tx) { 638 tmp = krealloc(master->dummy_tx, max_tx, 639 GFP_KERNEL | GFP_DMA); 640 if (!tmp) 641 return -ENOMEM; 642 master->dummy_tx = tmp; 643 memset(tmp, 0, max_tx); 644 } 645 646 if (max_rx) { 647 tmp = krealloc(master->dummy_rx, max_rx, 648 GFP_KERNEL | GFP_DMA); 649 if (!tmp) 650 return -ENOMEM; 651 master->dummy_rx = tmp; 652 } 653 654 if (max_tx || max_rx) { 655 list_for_each_entry(xfer, &msg->transfers, 656 transfer_list) { 657 if (!xfer->tx_buf) 658 xfer->tx_buf = master->dummy_tx; 659 if (!xfer->rx_buf) 660 xfer->rx_buf = master->dummy_rx; 661 } 662 } 663 } 664 665 return __spi_map_msg(master, msg); 666} 667 668/* 669 * spi_transfer_one_message - Default implementation of transfer_one_message() 670 * 671 * This is a standard implementation of transfer_one_message() for 672 * drivers which impelment a transfer_one() operation. It provides 673 * standard handling of delays and chip select management. 674 */ 675static int spi_transfer_one_message(struct spi_master *master, 676 struct spi_message *msg) 677{ 678 struct spi_transfer *xfer; 679 bool keep_cs = false; 680 int ret = 0; 681 unsigned long ms = 1; 682 683 spi_set_cs(msg->spi, true); 684 685 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 686 trace_spi_transfer_start(msg, xfer); 687 688 if (xfer->tx_buf || xfer->rx_buf) { 689 reinit_completion(&master->xfer_completion); 690 691 ret = master->transfer_one(master, msg->spi, xfer); 692 if (ret < 0) { 693 dev_err(&msg->spi->dev, 694 "SPI transfer failed: %d\n", ret); 695 goto out; 696 } 697 698 if (ret > 0) { 699 ret = 0; 700 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 701 ms += ms + 100; /* some tolerance */ 702 703 ms = wait_for_completion_timeout(&master->xfer_completion, 704 msecs_to_jiffies(ms)); 705 } 706 707 if (ms == 0) { 708 dev_err(&msg->spi->dev, 709 "SPI transfer timed out\n"); 710 msg->status = -ETIMEDOUT; 711 } 712 } else { 713 if (xfer->len) 714 dev_err(&msg->spi->dev, 715 "Bufferless transfer has length %u\n", 716 xfer->len); 717 } 718 719 trace_spi_transfer_stop(msg, xfer); 720 721 if (msg->status != -EINPROGRESS) 722 goto out; 723 724 if (xfer->delay_usecs) 725 udelay(xfer->delay_usecs); 726 727 if (xfer->cs_change) { 728 if (list_is_last(&xfer->transfer_list, 729 &msg->transfers)) { 730 keep_cs = true; 731 } else { 732 spi_set_cs(msg->spi, false); 733 udelay(10); 734 spi_set_cs(msg->spi, true); 735 } 736 } 737 738 msg->actual_length += xfer->len; 739 } 740 741out: 742 if (ret != 0 || !keep_cs) 743 spi_set_cs(msg->spi, false); 744 745 if (msg->status == -EINPROGRESS) 746 msg->status = ret; 747 748 if (msg->status && master->handle_err) 749 master->handle_err(master, msg); 750 751 spi_finalize_current_message(master); 752 753 return ret; 754} 755 756/** 757 * spi_finalize_current_transfer - report completion of a transfer 758 * @master: the master reporting completion 759 * 760 * Called by SPI drivers using the core transfer_one_message() 761 * implementation to notify it that the current interrupt driven 762 * transfer has finished and the next one may be scheduled. 763 */ 764void spi_finalize_current_transfer(struct spi_master *master) 765{ 766 complete(&master->xfer_completion); 767} 768EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 769 770/** 771 * __spi_pump_messages - function which processes spi message queue 772 * @master: master to process queue for 773 * @in_kthread: true if we are in the context of the message pump thread 774 * 775 * This function checks if there is any spi message in the queue that 776 * needs processing and if so call out to the driver to initialize hardware 777 * and transfer each message. 778 * 779 * Note that it is called both from the kthread itself and also from 780 * inside spi_sync(); the queue extraction handling at the top of the 781 * function should deal with this safely. 782 */ 783static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 784{ 785 unsigned long flags; 786 bool was_busy = false; 787 int ret; 788 789 /* Lock queue */ 790 spin_lock_irqsave(&master->queue_lock, flags); 791 792 /* Make sure we are not already running a message */ 793 if (master->cur_msg) { 794 spin_unlock_irqrestore(&master->queue_lock, flags); 795 return; 796 } 797 798 /* If another context is idling the device then defer */ 799 if (master->idling) { 800 queue_kthread_work(&master->kworker, &master->pump_messages); 801 spin_unlock_irqrestore(&master->queue_lock, flags); 802 return; 803 } 804 805 /* Check if the queue is idle */ 806 if (list_empty(&master->queue) || !master->running) { 807 if (!master->busy) { 808 spin_unlock_irqrestore(&master->queue_lock, flags); 809 return; 810 } 811 812 /* Only do teardown in the thread */ 813 if (!in_kthread) { 814 queue_kthread_work(&master->kworker, 815 &master->pump_messages); 816 spin_unlock_irqrestore(&master->queue_lock, flags); 817 return; 818 } 819 820 master->busy = false; 821 master->idling = true; 822 spin_unlock_irqrestore(&master->queue_lock, flags); 823 824 kfree(master->dummy_rx); 825 master->dummy_rx = NULL; 826 kfree(master->dummy_tx); 827 master->dummy_tx = NULL; 828 if (master->unprepare_transfer_hardware && 829 master->unprepare_transfer_hardware(master)) 830 dev_err(&master->dev, 831 "failed to unprepare transfer hardware\n"); 832 if (master->auto_runtime_pm) { 833 pm_runtime_mark_last_busy(master->dev.parent); 834 pm_runtime_put_autosuspend(master->dev.parent); 835 } 836 trace_spi_master_idle(master); 837 838 spin_lock_irqsave(&master->queue_lock, flags); 839 master->idling = false; 840 spin_unlock_irqrestore(&master->queue_lock, flags); 841 return; 842 } 843 844 /* Extract head of queue */ 845 master->cur_msg = 846 list_first_entry(&master->queue, struct spi_message, queue); 847 848 list_del_init(&master->cur_msg->queue); 849 if (master->busy) 850 was_busy = true; 851 else 852 master->busy = true; 853 spin_unlock_irqrestore(&master->queue_lock, flags); 854 855 if (!was_busy && master->auto_runtime_pm) { 856 ret = pm_runtime_get_sync(master->dev.parent); 857 if (ret < 0) { 858 dev_err(&master->dev, "Failed to power device: %d\n", 859 ret); 860 return; 861 } 862 } 863 864 if (!was_busy) 865 trace_spi_master_busy(master); 866 867 if (!was_busy && master->prepare_transfer_hardware) { 868 ret = master->prepare_transfer_hardware(master); 869 if (ret) { 870 dev_err(&master->dev, 871 "failed to prepare transfer hardware\n"); 872 873 if (master->auto_runtime_pm) 874 pm_runtime_put(master->dev.parent); 875 return; 876 } 877 } 878 879 trace_spi_message_start(master->cur_msg); 880 881 if (master->prepare_message) { 882 ret = master->prepare_message(master, master->cur_msg); 883 if (ret) { 884 dev_err(&master->dev, 885 "failed to prepare message: %d\n", ret); 886 master->cur_msg->status = ret; 887 spi_finalize_current_message(master); 888 return; 889 } 890 master->cur_msg_prepared = true; 891 } 892 893 ret = spi_map_msg(master, master->cur_msg); 894 if (ret) { 895 master->cur_msg->status = ret; 896 spi_finalize_current_message(master); 897 return; 898 } 899 900 ret = master->transfer_one_message(master, master->cur_msg); 901 if (ret) { 902 dev_err(&master->dev, 903 "failed to transfer one message from queue\n"); 904 return; 905 } 906} 907 908/** 909 * spi_pump_messages - kthread work function which processes spi message queue 910 * @work: pointer to kthread work struct contained in the master struct 911 */ 912static void spi_pump_messages(struct kthread_work *work) 913{ 914 struct spi_master *master = 915 container_of(work, struct spi_master, pump_messages); 916 917 __spi_pump_messages(master, true); 918} 919 920static int spi_init_queue(struct spi_master *master) 921{ 922 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 923 924 master->running = false; 925 master->busy = false; 926 927 init_kthread_worker(&master->kworker); 928 master->kworker_task = kthread_run(kthread_worker_fn, 929 &master->kworker, "%s", 930 dev_name(&master->dev)); 931 if (IS_ERR(master->kworker_task)) { 932 dev_err(&master->dev, "failed to create message pump task\n"); 933 return PTR_ERR(master->kworker_task); 934 } 935 init_kthread_work(&master->pump_messages, spi_pump_messages); 936 937 /* 938 * Master config will indicate if this controller should run the 939 * message pump with high (realtime) priority to reduce the transfer 940 * latency on the bus by minimising the delay between a transfer 941 * request and the scheduling of the message pump thread. Without this 942 * setting the message pump thread will remain at default priority. 943 */ 944 if (master->rt) { 945 dev_info(&master->dev, 946 "will run message pump with realtime priority\n"); 947 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 948 } 949 950 return 0; 951} 952 953/** 954 * spi_get_next_queued_message() - called by driver to check for queued 955 * messages 956 * @master: the master to check for queued messages 957 * 958 * If there are more messages in the queue, the next message is returned from 959 * this call. 960 */ 961struct spi_message *spi_get_next_queued_message(struct spi_master *master) 962{ 963 struct spi_message *next; 964 unsigned long flags; 965 966 /* get a pointer to the next message, if any */ 967 spin_lock_irqsave(&master->queue_lock, flags); 968 next = list_first_entry_or_null(&master->queue, struct spi_message, 969 queue); 970 spin_unlock_irqrestore(&master->queue_lock, flags); 971 972 return next; 973} 974EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 975 976/** 977 * spi_finalize_current_message() - the current message is complete 978 * @master: the master to return the message to 979 * 980 * Called by the driver to notify the core that the message in the front of the 981 * queue is complete and can be removed from the queue. 982 */ 983void spi_finalize_current_message(struct spi_master *master) 984{ 985 struct spi_message *mesg; 986 unsigned long flags; 987 int ret; 988 989 spin_lock_irqsave(&master->queue_lock, flags); 990 mesg = master->cur_msg; 991 spin_unlock_irqrestore(&master->queue_lock, flags); 992 993 spi_unmap_msg(master, mesg); 994 995 if (master->cur_msg_prepared && master->unprepare_message) { 996 ret = master->unprepare_message(master, mesg); 997 if (ret) { 998 dev_err(&master->dev, 999 "failed to unprepare message: %d\n", ret); 1000 } 1001 } 1002 1003 spin_lock_irqsave(&master->queue_lock, flags); 1004 master->cur_msg = NULL; 1005 master->cur_msg_prepared = false; 1006 queue_kthread_work(&master->kworker, &master->pump_messages); 1007 spin_unlock_irqrestore(&master->queue_lock, flags); 1008 1009 trace_spi_message_done(mesg); 1010 1011 mesg->state = NULL; 1012 if (mesg->complete) 1013 mesg->complete(mesg->context); 1014} 1015EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1016 1017static int spi_start_queue(struct spi_master *master) 1018{ 1019 unsigned long flags; 1020 1021 spin_lock_irqsave(&master->queue_lock, flags); 1022 1023 if (master->running || master->busy) { 1024 spin_unlock_irqrestore(&master->queue_lock, flags); 1025 return -EBUSY; 1026 } 1027 1028 master->running = true; 1029 master->cur_msg = NULL; 1030 spin_unlock_irqrestore(&master->queue_lock, flags); 1031 1032 queue_kthread_work(&master->kworker, &master->pump_messages); 1033 1034 return 0; 1035} 1036 1037static int spi_stop_queue(struct spi_master *master) 1038{ 1039 unsigned long flags; 1040 unsigned limit = 500; 1041 int ret = 0; 1042 1043 spin_lock_irqsave(&master->queue_lock, flags); 1044 1045 /* 1046 * This is a bit lame, but is optimized for the common execution path. 1047 * A wait_queue on the master->busy could be used, but then the common 1048 * execution path (pump_messages) would be required to call wake_up or 1049 * friends on every SPI message. Do this instead. 1050 */ 1051 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1052 spin_unlock_irqrestore(&master->queue_lock, flags); 1053 usleep_range(10000, 11000); 1054 spin_lock_irqsave(&master->queue_lock, flags); 1055 } 1056 1057 if (!list_empty(&master->queue) || master->busy) 1058 ret = -EBUSY; 1059 else 1060 master->running = false; 1061 1062 spin_unlock_irqrestore(&master->queue_lock, flags); 1063 1064 if (ret) { 1065 dev_warn(&master->dev, 1066 "could not stop message queue\n"); 1067 return ret; 1068 } 1069 return ret; 1070} 1071 1072static int spi_destroy_queue(struct spi_master *master) 1073{ 1074 int ret; 1075 1076 ret = spi_stop_queue(master); 1077 1078 /* 1079 * flush_kthread_worker will block until all work is done. 1080 * If the reason that stop_queue timed out is that the work will never 1081 * finish, then it does no good to call flush/stop thread, so 1082 * return anyway. 1083 */ 1084 if (ret) { 1085 dev_err(&master->dev, "problem destroying queue\n"); 1086 return ret; 1087 } 1088 1089 flush_kthread_worker(&master->kworker); 1090 kthread_stop(master->kworker_task); 1091 1092 return 0; 1093} 1094 1095static int __spi_queued_transfer(struct spi_device *spi, 1096 struct spi_message *msg, 1097 bool need_pump) 1098{ 1099 struct spi_master *master = spi->master; 1100 unsigned long flags; 1101 1102 spin_lock_irqsave(&master->queue_lock, flags); 1103 1104 if (!master->running) { 1105 spin_unlock_irqrestore(&master->queue_lock, flags); 1106 return -ESHUTDOWN; 1107 } 1108 msg->actual_length = 0; 1109 msg->status = -EINPROGRESS; 1110 1111 list_add_tail(&msg->queue, &master->queue); 1112 if (!master->busy && need_pump) 1113 queue_kthread_work(&master->kworker, &master->pump_messages); 1114 1115 spin_unlock_irqrestore(&master->queue_lock, flags); 1116 return 0; 1117} 1118 1119/** 1120 * spi_queued_transfer - transfer function for queued transfers 1121 * @spi: spi device which is requesting transfer 1122 * @msg: spi message which is to handled is queued to driver queue 1123 */ 1124static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1125{ 1126 return __spi_queued_transfer(spi, msg, true); 1127} 1128 1129static int spi_master_initialize_queue(struct spi_master *master) 1130{ 1131 int ret; 1132 1133 master->transfer = spi_queued_transfer; 1134 if (!master->transfer_one_message) 1135 master->transfer_one_message = spi_transfer_one_message; 1136 1137 /* Initialize and start queue */ 1138 ret = spi_init_queue(master); 1139 if (ret) { 1140 dev_err(&master->dev, "problem initializing queue\n"); 1141 goto err_init_queue; 1142 } 1143 master->queued = true; 1144 ret = spi_start_queue(master); 1145 if (ret) { 1146 dev_err(&master->dev, "problem starting queue\n"); 1147 goto err_start_queue; 1148 } 1149 1150 return 0; 1151 1152err_start_queue: 1153 spi_destroy_queue(master); 1154err_init_queue: 1155 return ret; 1156} 1157 1158/*-------------------------------------------------------------------------*/ 1159 1160#if defined(CONFIG_OF) 1161static struct spi_device * 1162of_register_spi_device(struct spi_master *master, struct device_node *nc) 1163{ 1164 struct spi_device *spi; 1165 int rc; 1166 u32 value; 1167 1168 /* Alloc an spi_device */ 1169 spi = spi_alloc_device(master); 1170 if (!spi) { 1171 dev_err(&master->dev, "spi_device alloc error for %s\n", 1172 nc->full_name); 1173 rc = -ENOMEM; 1174 goto err_out; 1175 } 1176 1177 /* Select device driver */ 1178 rc = of_modalias_node(nc, spi->modalias, 1179 sizeof(spi->modalias)); 1180 if (rc < 0) { 1181 dev_err(&master->dev, "cannot find modalias for %s\n", 1182 nc->full_name); 1183 goto err_out; 1184 } 1185 1186 /* Device address */ 1187 rc = of_property_read_u32(nc, "reg", &value); 1188 if (rc) { 1189 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1190 nc->full_name, rc); 1191 goto err_out; 1192 } 1193 spi->chip_select = value; 1194 1195 /* Mode (clock phase/polarity/etc.) */ 1196 if (of_find_property(nc, "spi-cpha", NULL)) 1197 spi->mode |= SPI_CPHA; 1198 if (of_find_property(nc, "spi-cpol", NULL)) 1199 spi->mode |= SPI_CPOL; 1200 if (of_find_property(nc, "spi-cs-high", NULL)) 1201 spi->mode |= SPI_CS_HIGH; 1202 if (of_find_property(nc, "spi-3wire", NULL)) 1203 spi->mode |= SPI_3WIRE; 1204 if (of_find_property(nc, "spi-lsb-first", NULL)) 1205 spi->mode |= SPI_LSB_FIRST; 1206 1207 /* Device DUAL/QUAD mode */ 1208 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1209 switch (value) { 1210 case 1: 1211 break; 1212 case 2: 1213 spi->mode |= SPI_TX_DUAL; 1214 break; 1215 case 4: 1216 spi->mode |= SPI_TX_QUAD; 1217 break; 1218 default: 1219 dev_warn(&master->dev, 1220 "spi-tx-bus-width %d not supported\n", 1221 value); 1222 break; 1223 } 1224 } 1225 1226 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1227 switch (value) { 1228 case 1: 1229 break; 1230 case 2: 1231 spi->mode |= SPI_RX_DUAL; 1232 break; 1233 case 4: 1234 spi->mode |= SPI_RX_QUAD; 1235 break; 1236 default: 1237 dev_warn(&master->dev, 1238 "spi-rx-bus-width %d not supported\n", 1239 value); 1240 break; 1241 } 1242 } 1243 1244 /* Device speed */ 1245 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1246 if (rc) { 1247 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1248 nc->full_name, rc); 1249 goto err_out; 1250 } 1251 spi->max_speed_hz = value; 1252 1253 /* IRQ */ 1254 spi->irq = irq_of_parse_and_map(nc, 0); 1255 1256 /* Store a pointer to the node in the device structure */ 1257 of_node_get(nc); 1258 spi->dev.of_node = nc; 1259 1260 /* Register the new device */ 1261 rc = spi_add_device(spi); 1262 if (rc) { 1263 dev_err(&master->dev, "spi_device register error %s\n", 1264 nc->full_name); 1265 goto err_out; 1266 } 1267 1268 return spi; 1269 1270err_out: 1271 spi_dev_put(spi); 1272 return ERR_PTR(rc); 1273} 1274 1275/** 1276 * of_register_spi_devices() - Register child devices onto the SPI bus 1277 * @master: Pointer to spi_master device 1278 * 1279 * Registers an spi_device for each child node of master node which has a 'reg' 1280 * property. 1281 */ 1282static void of_register_spi_devices(struct spi_master *master) 1283{ 1284 struct spi_device *spi; 1285 struct device_node *nc; 1286 1287 if (!master->dev.of_node) 1288 return; 1289 1290 for_each_available_child_of_node(master->dev.of_node, nc) { 1291 spi = of_register_spi_device(master, nc); 1292 if (IS_ERR(spi)) 1293 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1294 nc->full_name); 1295 } 1296} 1297#else 1298static void of_register_spi_devices(struct spi_master *master) { } 1299#endif 1300 1301#ifdef CONFIG_ACPI 1302static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1303{ 1304 struct spi_device *spi = data; 1305 1306 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1307 struct acpi_resource_spi_serialbus *sb; 1308 1309 sb = &ares->data.spi_serial_bus; 1310 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1311 spi->chip_select = sb->device_selection; 1312 spi->max_speed_hz = sb->connection_speed; 1313 1314 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1315 spi->mode |= SPI_CPHA; 1316 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1317 spi->mode |= SPI_CPOL; 1318 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1319 spi->mode |= SPI_CS_HIGH; 1320 } 1321 } else if (spi->irq < 0) { 1322 struct resource r; 1323 1324 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1325 spi->irq = r.start; 1326 } 1327 1328 /* Always tell the ACPI core to skip this resource */ 1329 return 1; 1330} 1331 1332static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1333 void *data, void **return_value) 1334{ 1335 struct spi_master *master = data; 1336 struct list_head resource_list; 1337 struct acpi_device *adev; 1338 struct spi_device *spi; 1339 int ret; 1340 1341 if (acpi_bus_get_device(handle, &adev)) 1342 return AE_OK; 1343 if (acpi_bus_get_status(adev) || !adev->status.present) 1344 return AE_OK; 1345 1346 spi = spi_alloc_device(master); 1347 if (!spi) { 1348 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1349 dev_name(&adev->dev)); 1350 return AE_NO_MEMORY; 1351 } 1352 1353 ACPI_COMPANION_SET(&spi->dev, adev); 1354 spi->irq = -1; 1355 1356 INIT_LIST_HEAD(&resource_list); 1357 ret = acpi_dev_get_resources(adev, &resource_list, 1358 acpi_spi_add_resource, spi); 1359 acpi_dev_free_resource_list(&resource_list); 1360 1361 if (ret < 0 || !spi->max_speed_hz) { 1362 spi_dev_put(spi); 1363 return AE_OK; 1364 } 1365 1366 adev->power.flags.ignore_parent = true; 1367 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1368 if (spi_add_device(spi)) { 1369 adev->power.flags.ignore_parent = false; 1370 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1371 dev_name(&adev->dev)); 1372 spi_dev_put(spi); 1373 } 1374 1375 return AE_OK; 1376} 1377 1378static void acpi_register_spi_devices(struct spi_master *master) 1379{ 1380 acpi_status status; 1381 acpi_handle handle; 1382 1383 handle = ACPI_HANDLE(master->dev.parent); 1384 if (!handle) 1385 return; 1386 1387 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1388 acpi_spi_add_device, NULL, 1389 master, NULL); 1390 if (ACPI_FAILURE(status)) 1391 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1392} 1393#else 1394static inline void acpi_register_spi_devices(struct spi_master *master) {} 1395#endif /* CONFIG_ACPI */ 1396 1397static void spi_master_release(struct device *dev) 1398{ 1399 struct spi_master *master; 1400 1401 master = container_of(dev, struct spi_master, dev); 1402 kfree(master); 1403} 1404 1405static struct class spi_master_class = { 1406 .name = "spi_master", 1407 .owner = THIS_MODULE, 1408 .dev_release = spi_master_release, 1409}; 1410 1411 1412 1413/** 1414 * spi_alloc_master - allocate SPI master controller 1415 * @dev: the controller, possibly using the platform_bus 1416 * @size: how much zeroed driver-private data to allocate; the pointer to this 1417 * memory is in the driver_data field of the returned device, 1418 * accessible with spi_master_get_devdata(). 1419 * Context: can sleep 1420 * 1421 * This call is used only by SPI master controller drivers, which are the 1422 * only ones directly touching chip registers. It's how they allocate 1423 * an spi_master structure, prior to calling spi_register_master(). 1424 * 1425 * This must be called from context that can sleep. It returns the SPI 1426 * master structure on success, else NULL. 1427 * 1428 * The caller is responsible for assigning the bus number and initializing 1429 * the master's methods before calling spi_register_master(); and (after errors 1430 * adding the device) calling spi_master_put() to prevent a memory leak. 1431 */ 1432struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1433{ 1434 struct spi_master *master; 1435 1436 if (!dev) 1437 return NULL; 1438 1439 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1440 if (!master) 1441 return NULL; 1442 1443 device_initialize(&master->dev); 1444 master->bus_num = -1; 1445 master->num_chipselect = 1; 1446 master->dev.class = &spi_master_class; 1447 master->dev.parent = get_device(dev); 1448 spi_master_set_devdata(master, &master[1]); 1449 1450 return master; 1451} 1452EXPORT_SYMBOL_GPL(spi_alloc_master); 1453 1454#ifdef CONFIG_OF 1455static int of_spi_register_master(struct spi_master *master) 1456{ 1457 int nb, i, *cs; 1458 struct device_node *np = master->dev.of_node; 1459 1460 if (!np) 1461 return 0; 1462 1463 nb = of_gpio_named_count(np, "cs-gpios"); 1464 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1465 1466 /* Return error only for an incorrectly formed cs-gpios property */ 1467 if (nb == 0 || nb == -ENOENT) 1468 return 0; 1469 else if (nb < 0) 1470 return nb; 1471 1472 cs = devm_kzalloc(&master->dev, 1473 sizeof(int) * master->num_chipselect, 1474 GFP_KERNEL); 1475 master->cs_gpios = cs; 1476 1477 if (!master->cs_gpios) 1478 return -ENOMEM; 1479 1480 for (i = 0; i < master->num_chipselect; i++) 1481 cs[i] = -ENOENT; 1482 1483 for (i = 0; i < nb; i++) 1484 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1485 1486 return 0; 1487} 1488#else 1489static int of_spi_register_master(struct spi_master *master) 1490{ 1491 return 0; 1492} 1493#endif 1494 1495/** 1496 * spi_register_master - register SPI master controller 1497 * @master: initialized master, originally from spi_alloc_master() 1498 * Context: can sleep 1499 * 1500 * SPI master controllers connect to their drivers using some non-SPI bus, 1501 * such as the platform bus. The final stage of probe() in that code 1502 * includes calling spi_register_master() to hook up to this SPI bus glue. 1503 * 1504 * SPI controllers use board specific (often SOC specific) bus numbers, 1505 * and board-specific addressing for SPI devices combines those numbers 1506 * with chip select numbers. Since SPI does not directly support dynamic 1507 * device identification, boards need configuration tables telling which 1508 * chip is at which address. 1509 * 1510 * This must be called from context that can sleep. It returns zero on 1511 * success, else a negative error code (dropping the master's refcount). 1512 * After a successful return, the caller is responsible for calling 1513 * spi_unregister_master(). 1514 */ 1515int spi_register_master(struct spi_master *master) 1516{ 1517 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1518 struct device *dev = master->dev.parent; 1519 struct boardinfo *bi; 1520 int status = -ENODEV; 1521 int dynamic = 0; 1522 1523 if (!dev) 1524 return -ENODEV; 1525 1526 status = of_spi_register_master(master); 1527 if (status) 1528 return status; 1529 1530 /* even if it's just one always-selected device, there must 1531 * be at least one chipselect 1532 */ 1533 if (master->num_chipselect == 0) 1534 return -EINVAL; 1535 1536 if ((master->bus_num < 0) && master->dev.of_node) 1537 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1538 1539 /* convention: dynamically assigned bus IDs count down from the max */ 1540 if (master->bus_num < 0) { 1541 /* FIXME switch to an IDR based scheme, something like 1542 * I2C now uses, so we can't run out of "dynamic" IDs 1543 */ 1544 master->bus_num = atomic_dec_return(&dyn_bus_id); 1545 dynamic = 1; 1546 } 1547 1548 INIT_LIST_HEAD(&master->queue); 1549 spin_lock_init(&master->queue_lock); 1550 spin_lock_init(&master->bus_lock_spinlock); 1551 mutex_init(&master->bus_lock_mutex); 1552 master->bus_lock_flag = 0; 1553 init_completion(&master->xfer_completion); 1554 if (!master->max_dma_len) 1555 master->max_dma_len = INT_MAX; 1556 1557 /* register the device, then userspace will see it. 1558 * registration fails if the bus ID is in use. 1559 */ 1560 dev_set_name(&master->dev, "spi%u", master->bus_num); 1561 status = device_add(&master->dev); 1562 if (status < 0) 1563 goto done; 1564 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1565 dynamic ? " (dynamic)" : ""); 1566 1567 /* If we're using a queued driver, start the queue */ 1568 if (master->transfer) 1569 dev_info(dev, "master is unqueued, this is deprecated\n"); 1570 else { 1571 status = spi_master_initialize_queue(master); 1572 if (status) { 1573 device_del(&master->dev); 1574 goto done; 1575 } 1576 } 1577 1578 mutex_lock(&board_lock); 1579 list_add_tail(&master->list, &spi_master_list); 1580 list_for_each_entry(bi, &board_list, list) 1581 spi_match_master_to_boardinfo(master, &bi->board_info); 1582 mutex_unlock(&board_lock); 1583 1584 /* Register devices from the device tree and ACPI */ 1585 of_register_spi_devices(master); 1586 acpi_register_spi_devices(master); 1587done: 1588 return status; 1589} 1590EXPORT_SYMBOL_GPL(spi_register_master); 1591 1592static void devm_spi_unregister(struct device *dev, void *res) 1593{ 1594 spi_unregister_master(*(struct spi_master **)res); 1595} 1596 1597/** 1598 * dev_spi_register_master - register managed SPI master controller 1599 * @dev: device managing SPI master 1600 * @master: initialized master, originally from spi_alloc_master() 1601 * Context: can sleep 1602 * 1603 * Register a SPI device as with spi_register_master() which will 1604 * automatically be unregister 1605 */ 1606int devm_spi_register_master(struct device *dev, struct spi_master *master) 1607{ 1608 struct spi_master **ptr; 1609 int ret; 1610 1611 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1612 if (!ptr) 1613 return -ENOMEM; 1614 1615 ret = spi_register_master(master); 1616 if (!ret) { 1617 *ptr = master; 1618 devres_add(dev, ptr); 1619 } else { 1620 devres_free(ptr); 1621 } 1622 1623 return ret; 1624} 1625EXPORT_SYMBOL_GPL(devm_spi_register_master); 1626 1627static int __unregister(struct device *dev, void *null) 1628{ 1629 spi_unregister_device(to_spi_device(dev)); 1630 return 0; 1631} 1632 1633/** 1634 * spi_unregister_master - unregister SPI master controller 1635 * @master: the master being unregistered 1636 * Context: can sleep 1637 * 1638 * This call is used only by SPI master controller drivers, which are the 1639 * only ones directly touching chip registers. 1640 * 1641 * This must be called from context that can sleep. 1642 */ 1643void spi_unregister_master(struct spi_master *master) 1644{ 1645 int dummy; 1646 1647 if (master->queued) { 1648 if (spi_destroy_queue(master)) 1649 dev_err(&master->dev, "queue remove failed\n"); 1650 } 1651 1652 mutex_lock(&board_lock); 1653 list_del(&master->list); 1654 mutex_unlock(&board_lock); 1655 1656 dummy = device_for_each_child(&master->dev, NULL, __unregister); 1657 device_unregister(&master->dev); 1658} 1659EXPORT_SYMBOL_GPL(spi_unregister_master); 1660 1661int spi_master_suspend(struct spi_master *master) 1662{ 1663 int ret; 1664 1665 /* Basically no-ops for non-queued masters */ 1666 if (!master->queued) 1667 return 0; 1668 1669 ret = spi_stop_queue(master); 1670 if (ret) 1671 dev_err(&master->dev, "queue stop failed\n"); 1672 1673 return ret; 1674} 1675EXPORT_SYMBOL_GPL(spi_master_suspend); 1676 1677int spi_master_resume(struct spi_master *master) 1678{ 1679 int ret; 1680 1681 if (!master->queued) 1682 return 0; 1683 1684 ret = spi_start_queue(master); 1685 if (ret) 1686 dev_err(&master->dev, "queue restart failed\n"); 1687 1688 return ret; 1689} 1690EXPORT_SYMBOL_GPL(spi_master_resume); 1691 1692static int __spi_master_match(struct device *dev, const void *data) 1693{ 1694 struct spi_master *m; 1695 const u16 *bus_num = data; 1696 1697 m = container_of(dev, struct spi_master, dev); 1698 return m->bus_num == *bus_num; 1699} 1700 1701/** 1702 * spi_busnum_to_master - look up master associated with bus_num 1703 * @bus_num: the master's bus number 1704 * Context: can sleep 1705 * 1706 * This call may be used with devices that are registered after 1707 * arch init time. It returns a refcounted pointer to the relevant 1708 * spi_master (which the caller must release), or NULL if there is 1709 * no such master registered. 1710 */ 1711struct spi_master *spi_busnum_to_master(u16 bus_num) 1712{ 1713 struct device *dev; 1714 struct spi_master *master = NULL; 1715 1716 dev = class_find_device(&spi_master_class, NULL, &bus_num, 1717 __spi_master_match); 1718 if (dev) 1719 master = container_of(dev, struct spi_master, dev); 1720 /* reference got in class_find_device */ 1721 return master; 1722} 1723EXPORT_SYMBOL_GPL(spi_busnum_to_master); 1724 1725 1726/*-------------------------------------------------------------------------*/ 1727 1728/* Core methods for SPI master protocol drivers. Some of the 1729 * other core methods are currently defined as inline functions. 1730 */ 1731 1732/** 1733 * spi_setup - setup SPI mode and clock rate 1734 * @spi: the device whose settings are being modified 1735 * Context: can sleep, and no requests are queued to the device 1736 * 1737 * SPI protocol drivers may need to update the transfer mode if the 1738 * device doesn't work with its default. They may likewise need 1739 * to update clock rates or word sizes from initial values. This function 1740 * changes those settings, and must be called from a context that can sleep. 1741 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 1742 * effect the next time the device is selected and data is transferred to 1743 * or from it. When this function returns, the spi device is deselected. 1744 * 1745 * Note that this call will fail if the protocol driver specifies an option 1746 * that the underlying controller or its driver does not support. For 1747 * example, not all hardware supports wire transfers using nine bit words, 1748 * LSB-first wire encoding, or active-high chipselects. 1749 */ 1750int spi_setup(struct spi_device *spi) 1751{ 1752 unsigned bad_bits, ugly_bits; 1753 int status = 0; 1754 1755 /* check mode to prevent that DUAL and QUAD set at the same time 1756 */ 1757 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 1758 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 1759 dev_err(&spi->dev, 1760 "setup: can not select dual and quad at the same time\n"); 1761 return -EINVAL; 1762 } 1763 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 1764 */ 1765 if ((spi->mode & SPI_3WIRE) && (spi->mode & 1766 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 1767 return -EINVAL; 1768 /* help drivers fail *cleanly* when they need options 1769 * that aren't supported with their current master 1770 */ 1771 bad_bits = spi->mode & ~spi->master->mode_bits; 1772 ugly_bits = bad_bits & 1773 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 1774 if (ugly_bits) { 1775 dev_warn(&spi->dev, 1776 "setup: ignoring unsupported mode bits %x\n", 1777 ugly_bits); 1778 spi->mode &= ~ugly_bits; 1779 bad_bits &= ~ugly_bits; 1780 } 1781 if (bad_bits) { 1782 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1783 bad_bits); 1784 return -EINVAL; 1785 } 1786 1787 if (!spi->bits_per_word) 1788 spi->bits_per_word = 8; 1789 1790 if (!spi->max_speed_hz) 1791 spi->max_speed_hz = spi->master->max_speed_hz; 1792 1793 spi_set_cs(spi, false); 1794 1795 if (spi->master->setup) 1796 status = spi->master->setup(spi); 1797 1798 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 1799 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 1800 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 1801 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 1802 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 1803 (spi->mode & SPI_LOOP) ? "loopback, " : "", 1804 spi->bits_per_word, spi->max_speed_hz, 1805 status); 1806 1807 return status; 1808} 1809EXPORT_SYMBOL_GPL(spi_setup); 1810 1811static int __spi_validate(struct spi_device *spi, struct spi_message *message) 1812{ 1813 struct spi_master *master = spi->master; 1814 struct spi_transfer *xfer; 1815 int w_size; 1816 1817 if (list_empty(&message->transfers)) 1818 return -EINVAL; 1819 1820 /* Half-duplex links include original MicroWire, and ones with 1821 * only one data pin like SPI_3WIRE (switches direction) or where 1822 * either MOSI or MISO is missing. They can also be caused by 1823 * software limitations. 1824 */ 1825 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 1826 || (spi->mode & SPI_3WIRE)) { 1827 unsigned flags = master->flags; 1828 1829 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1830 if (xfer->rx_buf && xfer->tx_buf) 1831 return -EINVAL; 1832 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 1833 return -EINVAL; 1834 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 1835 return -EINVAL; 1836 } 1837 } 1838 1839 /** 1840 * Set transfer bits_per_word and max speed as spi device default if 1841 * it is not set for this transfer. 1842 * Set transfer tx_nbits and rx_nbits as single transfer default 1843 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 1844 */ 1845 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1846 message->frame_length += xfer->len; 1847 if (!xfer->bits_per_word) 1848 xfer->bits_per_word = spi->bits_per_word; 1849 1850 if (!xfer->speed_hz) 1851 xfer->speed_hz = spi->max_speed_hz; 1852 1853 if (master->max_speed_hz && 1854 xfer->speed_hz > master->max_speed_hz) 1855 xfer->speed_hz = master->max_speed_hz; 1856 1857 if (master->bits_per_word_mask) { 1858 /* Only 32 bits fit in the mask */ 1859 if (xfer->bits_per_word > 32) 1860 return -EINVAL; 1861 if (!(master->bits_per_word_mask & 1862 BIT(xfer->bits_per_word - 1))) 1863 return -EINVAL; 1864 } 1865 1866 /* 1867 * SPI transfer length should be multiple of SPI word size 1868 * where SPI word size should be power-of-two multiple 1869 */ 1870 if (xfer->bits_per_word <= 8) 1871 w_size = 1; 1872 else if (xfer->bits_per_word <= 16) 1873 w_size = 2; 1874 else 1875 w_size = 4; 1876 1877 /* No partial transfers accepted */ 1878 if (xfer->len % w_size) 1879 return -EINVAL; 1880 1881 if (xfer->speed_hz && master->min_speed_hz && 1882 xfer->speed_hz < master->min_speed_hz) 1883 return -EINVAL; 1884 1885 if (xfer->tx_buf && !xfer->tx_nbits) 1886 xfer->tx_nbits = SPI_NBITS_SINGLE; 1887 if (xfer->rx_buf && !xfer->rx_nbits) 1888 xfer->rx_nbits = SPI_NBITS_SINGLE; 1889 /* check transfer tx/rx_nbits: 1890 * 1. check the value matches one of single, dual and quad 1891 * 2. check tx/rx_nbits match the mode in spi_device 1892 */ 1893 if (xfer->tx_buf) { 1894 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 1895 xfer->tx_nbits != SPI_NBITS_DUAL && 1896 xfer->tx_nbits != SPI_NBITS_QUAD) 1897 return -EINVAL; 1898 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 1899 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 1900 return -EINVAL; 1901 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 1902 !(spi->mode & SPI_TX_QUAD)) 1903 return -EINVAL; 1904 } 1905 /* check transfer rx_nbits */ 1906 if (xfer->rx_buf) { 1907 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 1908 xfer->rx_nbits != SPI_NBITS_DUAL && 1909 xfer->rx_nbits != SPI_NBITS_QUAD) 1910 return -EINVAL; 1911 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 1912 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 1913 return -EINVAL; 1914 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 1915 !(spi->mode & SPI_RX_QUAD)) 1916 return -EINVAL; 1917 } 1918 } 1919 1920 message->status = -EINPROGRESS; 1921 1922 return 0; 1923} 1924 1925static int __spi_async(struct spi_device *spi, struct spi_message *message) 1926{ 1927 struct spi_master *master = spi->master; 1928 1929 message->spi = spi; 1930 1931 trace_spi_message_submit(message); 1932 1933 return master->transfer(spi, message); 1934} 1935 1936/** 1937 * spi_async - asynchronous SPI transfer 1938 * @spi: device with which data will be exchanged 1939 * @message: describes the data transfers, including completion callback 1940 * Context: any (irqs may be blocked, etc) 1941 * 1942 * This call may be used in_irq and other contexts which can't sleep, 1943 * as well as from task contexts which can sleep. 1944 * 1945 * The completion callback is invoked in a context which can't sleep. 1946 * Before that invocation, the value of message->status is undefined. 1947 * When the callback is issued, message->status holds either zero (to 1948 * indicate complete success) or a negative error code. After that 1949 * callback returns, the driver which issued the transfer request may 1950 * deallocate the associated memory; it's no longer in use by any SPI 1951 * core or controller driver code. 1952 * 1953 * Note that although all messages to a spi_device are handled in 1954 * FIFO order, messages may go to different devices in other orders. 1955 * Some device might be higher priority, or have various "hard" access 1956 * time requirements, for example. 1957 * 1958 * On detection of any fault during the transfer, processing of 1959 * the entire message is aborted, and the device is deselected. 1960 * Until returning from the associated message completion callback, 1961 * no other spi_message queued to that device will be processed. 1962 * (This rule applies equally to all the synchronous transfer calls, 1963 * which are wrappers around this core asynchronous primitive.) 1964 */ 1965int spi_async(struct spi_device *spi, struct spi_message *message) 1966{ 1967 struct spi_master *master = spi->master; 1968 int ret; 1969 unsigned long flags; 1970 1971 ret = __spi_validate(spi, message); 1972 if (ret != 0) 1973 return ret; 1974 1975 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1976 1977 if (master->bus_lock_flag) 1978 ret = -EBUSY; 1979 else 1980 ret = __spi_async(spi, message); 1981 1982 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1983 1984 return ret; 1985} 1986EXPORT_SYMBOL_GPL(spi_async); 1987 1988/** 1989 * spi_async_locked - version of spi_async with exclusive bus usage 1990 * @spi: device with which data will be exchanged 1991 * @message: describes the data transfers, including completion callback 1992 * Context: any (irqs may be blocked, etc) 1993 * 1994 * This call may be used in_irq and other contexts which can't sleep, 1995 * as well as from task contexts which can sleep. 1996 * 1997 * The completion callback is invoked in a context which can't sleep. 1998 * Before that invocation, the value of message->status is undefined. 1999 * When the callback is issued, message->status holds either zero (to 2000 * indicate complete success) or a negative error code. After that 2001 * callback returns, the driver which issued the transfer request may 2002 * deallocate the associated memory; it's no longer in use by any SPI 2003 * core or controller driver code. 2004 * 2005 * Note that although all messages to a spi_device are handled in 2006 * FIFO order, messages may go to different devices in other orders. 2007 * Some device might be higher priority, or have various "hard" access 2008 * time requirements, for example. 2009 * 2010 * On detection of any fault during the transfer, processing of 2011 * the entire message is aborted, and the device is deselected. 2012 * Until returning from the associated message completion callback, 2013 * no other spi_message queued to that device will be processed. 2014 * (This rule applies equally to all the synchronous transfer calls, 2015 * which are wrappers around this core asynchronous primitive.) 2016 */ 2017int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2018{ 2019 struct spi_master *master = spi->master; 2020 int ret; 2021 unsigned long flags; 2022 2023 ret = __spi_validate(spi, message); 2024 if (ret != 0) 2025 return ret; 2026 2027 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2028 2029 ret = __spi_async(spi, message); 2030 2031 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2032 2033 return ret; 2034 2035} 2036EXPORT_SYMBOL_GPL(spi_async_locked); 2037 2038 2039/*-------------------------------------------------------------------------*/ 2040 2041/* Utility methods for SPI master protocol drivers, layered on 2042 * top of the core. Some other utility methods are defined as 2043 * inline functions. 2044 */ 2045 2046static void spi_complete(void *arg) 2047{ 2048 complete(arg); 2049} 2050 2051static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2052 int bus_locked) 2053{ 2054 DECLARE_COMPLETION_ONSTACK(done); 2055 int status; 2056 struct spi_master *master = spi->master; 2057 unsigned long flags; 2058 2059 status = __spi_validate(spi, message); 2060 if (status != 0) 2061 return status; 2062 2063 message->complete = spi_complete; 2064 message->context = &done; 2065 message->spi = spi; 2066 2067 if (!bus_locked) 2068 mutex_lock(&master->bus_lock_mutex); 2069 2070 /* If we're not using the legacy transfer method then we will 2071 * try to transfer in the calling context so special case. 2072 * This code would be less tricky if we could remove the 2073 * support for driver implemented message queues. 2074 */ 2075 if (master->transfer == spi_queued_transfer) { 2076 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2077 2078 trace_spi_message_submit(message); 2079 2080 status = __spi_queued_transfer(spi, message, false); 2081 2082 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2083 } else { 2084 status = spi_async_locked(spi, message); 2085 } 2086 2087 if (!bus_locked) 2088 mutex_unlock(&master->bus_lock_mutex); 2089 2090 if (status == 0) { 2091 /* Push out the messages in the calling context if we 2092 * can. 2093 */ 2094 if (master->transfer == spi_queued_transfer) 2095 __spi_pump_messages(master, false); 2096 2097 wait_for_completion(&done); 2098 status = message->status; 2099 } 2100 message->context = NULL; 2101 return status; 2102} 2103 2104/** 2105 * spi_sync - blocking/synchronous SPI data transfers 2106 * @spi: device with which data will be exchanged 2107 * @message: describes the data transfers 2108 * Context: can sleep 2109 * 2110 * This call may only be used from a context that may sleep. The sleep 2111 * is non-interruptible, and has no timeout. Low-overhead controller 2112 * drivers may DMA directly into and out of the message buffers. 2113 * 2114 * Note that the SPI device's chip select is active during the message, 2115 * and then is normally disabled between messages. Drivers for some 2116 * frequently-used devices may want to minimize costs of selecting a chip, 2117 * by leaving it selected in anticipation that the next message will go 2118 * to the same chip. (That may increase power usage.) 2119 * 2120 * Also, the caller is guaranteeing that the memory associated with the 2121 * message will not be freed before this call returns. 2122 * 2123 * It returns zero on success, else a negative error code. 2124 */ 2125int spi_sync(struct spi_device *spi, struct spi_message *message) 2126{ 2127 return __spi_sync(spi, message, 0); 2128} 2129EXPORT_SYMBOL_GPL(spi_sync); 2130 2131/** 2132 * spi_sync_locked - version of spi_sync with exclusive bus usage 2133 * @spi: device with which data will be exchanged 2134 * @message: describes the data transfers 2135 * Context: can sleep 2136 * 2137 * This call may only be used from a context that may sleep. The sleep 2138 * is non-interruptible, and has no timeout. Low-overhead controller 2139 * drivers may DMA directly into and out of the message buffers. 2140 * 2141 * This call should be used by drivers that require exclusive access to the 2142 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2143 * be released by a spi_bus_unlock call when the exclusive access is over. 2144 * 2145 * It returns zero on success, else a negative error code. 2146 */ 2147int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2148{ 2149 return __spi_sync(spi, message, 1); 2150} 2151EXPORT_SYMBOL_GPL(spi_sync_locked); 2152 2153/** 2154 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2155 * @master: SPI bus master that should be locked for exclusive bus access 2156 * Context: can sleep 2157 * 2158 * This call may only be used from a context that may sleep. The sleep 2159 * is non-interruptible, and has no timeout. 2160 * 2161 * This call should be used by drivers that require exclusive access to the 2162 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2163 * exclusive access is over. Data transfer must be done by spi_sync_locked 2164 * and spi_async_locked calls when the SPI bus lock is held. 2165 * 2166 * It returns zero on success, else a negative error code. 2167 */ 2168int spi_bus_lock(struct spi_master *master) 2169{ 2170 unsigned long flags; 2171 2172 mutex_lock(&master->bus_lock_mutex); 2173 2174 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2175 master->bus_lock_flag = 1; 2176 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2177 2178 /* mutex remains locked until spi_bus_unlock is called */ 2179 2180 return 0; 2181} 2182EXPORT_SYMBOL_GPL(spi_bus_lock); 2183 2184/** 2185 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2186 * @master: SPI bus master that was locked for exclusive bus access 2187 * Context: can sleep 2188 * 2189 * This call may only be used from a context that may sleep. The sleep 2190 * is non-interruptible, and has no timeout. 2191 * 2192 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2193 * call. 2194 * 2195 * It returns zero on success, else a negative error code. 2196 */ 2197int spi_bus_unlock(struct spi_master *master) 2198{ 2199 master->bus_lock_flag = 0; 2200 2201 mutex_unlock(&master->bus_lock_mutex); 2202 2203 return 0; 2204} 2205EXPORT_SYMBOL_GPL(spi_bus_unlock); 2206 2207/* portable code must never pass more than 32 bytes */ 2208#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 2209 2210static u8 *buf; 2211 2212/** 2213 * spi_write_then_read - SPI synchronous write followed by read 2214 * @spi: device with which data will be exchanged 2215 * @txbuf: data to be written (need not be dma-safe) 2216 * @n_tx: size of txbuf, in bytes 2217 * @rxbuf: buffer into which data will be read (need not be dma-safe) 2218 * @n_rx: size of rxbuf, in bytes 2219 * Context: can sleep 2220 * 2221 * This performs a half duplex MicroWire style transaction with the 2222 * device, sending txbuf and then reading rxbuf. The return value 2223 * is zero for success, else a negative errno status code. 2224 * This call may only be used from a context that may sleep. 2225 * 2226 * Parameters to this routine are always copied using a small buffer; 2227 * portable code should never use this for more than 32 bytes. 2228 * Performance-sensitive or bulk transfer code should instead use 2229 * spi_{async,sync}() calls with dma-safe buffers. 2230 */ 2231int spi_write_then_read(struct spi_device *spi, 2232 const void *txbuf, unsigned n_tx, 2233 void *rxbuf, unsigned n_rx) 2234{ 2235 static DEFINE_MUTEX(lock); 2236 2237 int status; 2238 struct spi_message message; 2239 struct spi_transfer x[2]; 2240 u8 *local_buf; 2241 2242 /* Use preallocated DMA-safe buffer if we can. We can't avoid 2243 * copying here, (as a pure convenience thing), but we can 2244 * keep heap costs out of the hot path unless someone else is 2245 * using the pre-allocated buffer or the transfer is too large. 2246 */ 2247 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 2248 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 2249 GFP_KERNEL | GFP_DMA); 2250 if (!local_buf) 2251 return -ENOMEM; 2252 } else { 2253 local_buf = buf; 2254 } 2255 2256 spi_message_init(&message); 2257 memset(x, 0, sizeof(x)); 2258 if (n_tx) { 2259 x[0].len = n_tx; 2260 spi_message_add_tail(&x[0], &message); 2261 } 2262 if (n_rx) { 2263 x[1].len = n_rx; 2264 spi_message_add_tail(&x[1], &message); 2265 } 2266 2267 memcpy(local_buf, txbuf, n_tx); 2268 x[0].tx_buf = local_buf; 2269 x[1].rx_buf = local_buf + n_tx; 2270 2271 /* do the i/o */ 2272 status = spi_sync(spi, &message); 2273 if (status == 0) 2274 memcpy(rxbuf, x[1].rx_buf, n_rx); 2275 2276 if (x[0].tx_buf == buf) 2277 mutex_unlock(&lock); 2278 else 2279 kfree(local_buf); 2280 2281 return status; 2282} 2283EXPORT_SYMBOL_GPL(spi_write_then_read); 2284 2285/*-------------------------------------------------------------------------*/ 2286 2287#if IS_ENABLED(CONFIG_OF_DYNAMIC) 2288static int __spi_of_device_match(struct device *dev, void *data) 2289{ 2290 return dev->of_node == data; 2291} 2292 2293/* must call put_device() when done with returned spi_device device */ 2294static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2295{ 2296 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2297 __spi_of_device_match); 2298 return dev ? to_spi_device(dev) : NULL; 2299} 2300 2301static int __spi_of_master_match(struct device *dev, const void *data) 2302{ 2303 return dev->of_node == data; 2304} 2305 2306/* the spi masters are not using spi_bus, so we find it with another way */ 2307static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2308{ 2309 struct device *dev; 2310 2311 dev = class_find_device(&spi_master_class, NULL, node, 2312 __spi_of_master_match); 2313 if (!dev) 2314 return NULL; 2315 2316 /* reference got in class_find_device */ 2317 return container_of(dev, struct spi_master, dev); 2318} 2319 2320static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2321 void *arg) 2322{ 2323 struct of_reconfig_data *rd = arg; 2324 struct spi_master *master; 2325 struct spi_device *spi; 2326 2327 switch (of_reconfig_get_state_change(action, arg)) { 2328 case OF_RECONFIG_CHANGE_ADD: 2329 master = of_find_spi_master_by_node(rd->dn->parent); 2330 if (master == NULL) 2331 return NOTIFY_OK; /* not for us */ 2332 2333 spi = of_register_spi_device(master, rd->dn); 2334 put_device(&master->dev); 2335 2336 if (IS_ERR(spi)) { 2337 pr_err("%s: failed to create for '%s'\n", 2338 __func__, rd->dn->full_name); 2339 return notifier_from_errno(PTR_ERR(spi)); 2340 } 2341 break; 2342 2343 case OF_RECONFIG_CHANGE_REMOVE: 2344 /* find our device by node */ 2345 spi = of_find_spi_device_by_node(rd->dn); 2346 if (spi == NULL) 2347 return NOTIFY_OK; /* no? not meant for us */ 2348 2349 /* unregister takes one ref away */ 2350 spi_unregister_device(spi); 2351 2352 /* and put the reference of the find */ 2353 put_device(&spi->dev); 2354 break; 2355 } 2356 2357 return NOTIFY_OK; 2358} 2359 2360static struct notifier_block spi_of_notifier = { 2361 .notifier_call = of_spi_notify, 2362}; 2363#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2364extern struct notifier_block spi_of_notifier; 2365#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2366 2367static int __init spi_init(void) 2368{ 2369 int status; 2370 2371 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2372 if (!buf) { 2373 status = -ENOMEM; 2374 goto err0; 2375 } 2376 2377 status = bus_register(&spi_bus_type); 2378 if (status < 0) 2379 goto err1; 2380 2381 status = class_register(&spi_master_class); 2382 if (status < 0) 2383 goto err2; 2384 2385 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2386 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2387 2388 return 0; 2389 2390err2: 2391 bus_unregister(&spi_bus_type); 2392err1: 2393 kfree(buf); 2394 buf = NULL; 2395err0: 2396 return status; 2397} 2398 2399/* board_info is normally registered in arch_initcall(), 2400 * but even essential drivers wait till later 2401 * 2402 * REVISIT only boardinfo really needs static linking. the rest (device and 2403 * driver registration) _could_ be dynamically linked (modular) ... costs 2404 * include needing to have boardinfo data structures be much more public. 2405 */ 2406postcore_initcall(spi_init); 2407 2408