This source file includes following definitions.
- sysfs_deprecated_setup
- device_links_write_lock
- device_links_write_unlock
- device_links_read_lock
- device_links_read_unlock
- device_links_read_lock_held
- device_links_write_lock
- device_links_write_unlock
- device_links_read_lock
- device_links_read_unlock
- device_links_read_lock_held
- device_is_dependent
- device_link_init_status
- device_reorder_to_tail
- device_pm_move_to_tail
- device_link_add
- device_link_free
- __device_link_free_srcu
- __device_link_del
- __device_link_del
- device_link_put_kref
- device_link_del
- device_link_remove
- device_links_missing_supplier
- device_links_check_suppliers
- device_links_driver_bound
- device_link_drop_managed
- __device_links_no_driver
- device_links_no_driver
- device_links_driver_cleanup
- device_links_busy
- device_links_unbind_consumers
- device_links_purge
- lock_device_hotplug
- unlock_device_hotplug
- lock_device_hotplug_sysfs
- device_is_not_partition
- device_is_not_partition
- device_platform_notify
- dev_driver_string
- dev_attr_show
- dev_attr_store
- device_store_ulong
- device_show_ulong
- device_store_int
- device_show_int
- device_store_bool
- device_show_bool
- device_release
- device_namespace
- device_get_ownership
- dev_uevent_filter
- dev_uevent_name
- dev_uevent
- uevent_show
- uevent_store
- online_show
- online_store
- device_add_groups
- device_remove_groups
- devm_attr_group_match
- devm_attr_group_remove
- devm_attr_groups_remove
- devm_device_add_group
- devm_device_remove_group
- devm_device_add_groups
- devm_device_remove_groups
- device_add_attrs
- device_remove_attrs
- dev_show
- devices_kset_move_before
- devices_kset_move_after
- devices_kset_move_last
- device_create_file
- device_remove_file
- device_remove_file_self
- device_create_bin_file
- device_remove_bin_file
- klist_children_get
- klist_children_put
- device_initialize
- virtual_device_parent
- class_dir_release
- class_dir_child_ns_type
- class_dir_create_and_add
- get_device_parent
- live_in_glue_dir
- get_glue_dir
- cleanup_glue_dir
- device_add_class_symlinks
- device_remove_class_symlinks
- dev_set_name
- device_to_dev_kobj
- device_create_sys_dev_entry
- device_remove_sys_dev_entry
- device_private_init
- device_add
- device_register
- get_device
- put_device
- kill_device
- device_del
- device_unregister
- prev_device
- next_device
- device_get_devnode
- device_for_each_child
- device_for_each_child_reverse
- device_find_child
- device_find_child_by_name
- devices_init
- device_check_offline
- device_offline
- device_online
- to_root_device
- root_device_release
- __root_device_register
- root_device_unregister
- device_create_release
- __printf
- device_create_vargs
- device_create
- device_create_with_groups
- device_destroy
- device_rename
- device_move_class_links
- device_move
- device_shutdown
- create_syslog_header
- dev_vprintk_emit
- dev_printk_emit
- __dev_printk
- dev_printk
- fwnode_is_primary
- set_primary_fwnode
- set_secondary_fwnode
- device_set_of_node_from_dev
- device_match_name
- device_match_of_node
- device_match_fwnode
- device_match_devt
- device_match_acpi_dev
- device_match_any
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 #include <linux/acpi.h>
  12 #include <linux/cpufreq.h>
  13 #include <linux/device.h>
  14 #include <linux/err.h>
  15 #include <linux/fwnode.h>
  16 #include <linux/init.h>
  17 #include <linux/module.h>
  18 #include <linux/slab.h>
  19 #include <linux/string.h>
  20 #include <linux/kdev_t.h>
  21 #include <linux/notifier.h>
  22 #include <linux/of.h>
  23 #include <linux/of_device.h>
  24 #include <linux/genhd.h>
  25 #include <linux/mutex.h>
  26 #include <linux/pm_runtime.h>
  27 #include <linux/netdevice.h>
  28 #include <linux/sched/signal.h>
  29 #include <linux/sysfs.h>
  30 
  31 #include "base.h"
  32 #include "power/power.h"
  33 
  34 #ifdef CONFIG_SYSFS_DEPRECATED
  35 #ifdef CONFIG_SYSFS_DEPRECATED_V2
  36 long sysfs_deprecated = 1;
  37 #else
  38 long sysfs_deprecated = 0;
  39 #endif
  40 static int __init sysfs_deprecated_setup(char *arg)
  41 {
  42         return kstrtol(arg, 10, &sysfs_deprecated);
  43 }
  44 early_param("sysfs.deprecated", sysfs_deprecated_setup);
  45 #endif
  46 
  47 
  48 
  49 #ifdef CONFIG_SRCU
  50 static DEFINE_MUTEX(device_links_lock);
  51 DEFINE_STATIC_SRCU(device_links_srcu);
  52 
  53 static inline void device_links_write_lock(void)
  54 {
  55         mutex_lock(&device_links_lock);
  56 }
  57 
  58 static inline void device_links_write_unlock(void)
  59 {
  60         mutex_unlock(&device_links_lock);
  61 }
  62 
  63 int device_links_read_lock(void)
  64 {
  65         return srcu_read_lock(&device_links_srcu);
  66 }
  67 
  68 void device_links_read_unlock(int idx)
  69 {
  70         srcu_read_unlock(&device_links_srcu, idx);
  71 }
  72 
  73 int device_links_read_lock_held(void)
  74 {
  75         return srcu_read_lock_held(&device_links_srcu);
  76 }
  77 #else 
  78 static DECLARE_RWSEM(device_links_lock);
  79 
  80 static inline void device_links_write_lock(void)
  81 {
  82         down_write(&device_links_lock);
  83 }
  84 
  85 static inline void device_links_write_unlock(void)
  86 {
  87         up_write(&device_links_lock);
  88 }
  89 
  90 int device_links_read_lock(void)
  91 {
  92         down_read(&device_links_lock);
  93         return 0;
  94 }
  95 
  96 void device_links_read_unlock(int not_used)
  97 {
  98         up_read(&device_links_lock);
  99 }
 100 
 101 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 102 int device_links_read_lock_held(void)
 103 {
 104         return lockdep_is_held(&device_links_lock);
 105 }
 106 #endif
 107 #endif 
 108 
 109 
 110 
 111 
 112 
 113 
 114 
 115 
 116 
 117 static int device_is_dependent(struct device *dev, void *target)
 118 {
 119         struct device_link *link;
 120         int ret;
 121 
 122         if (dev == target)
 123                 return 1;
 124 
 125         ret = device_for_each_child(dev, target, device_is_dependent);
 126         if (ret)
 127                 return ret;
 128 
 129         list_for_each_entry(link, &dev->links.consumers, s_node) {
 130                 if (link->consumer == target)
 131                         return 1;
 132 
 133                 ret = device_is_dependent(link->consumer, target);
 134                 if (ret)
 135                         break;
 136         }
 137         return ret;
 138 }
 139 
 140 static void device_link_init_status(struct device_link *link,
 141                                     struct device *consumer,
 142                                     struct device *supplier)
 143 {
 144         switch (supplier->links.status) {
 145         case DL_DEV_PROBING:
 146                 switch (consumer->links.status) {
 147                 case DL_DEV_PROBING:
 148                         
 149 
 150 
 151 
 152 
 153 
 154 
 155                         link->status = DL_STATE_CONSUMER_PROBE;
 156                         break;
 157                 default:
 158                         link->status = DL_STATE_DORMANT;
 159                         break;
 160                 }
 161                 break;
 162         case DL_DEV_DRIVER_BOUND:
 163                 switch (consumer->links.status) {
 164                 case DL_DEV_PROBING:
 165                         link->status = DL_STATE_CONSUMER_PROBE;
 166                         break;
 167                 case DL_DEV_DRIVER_BOUND:
 168                         link->status = DL_STATE_ACTIVE;
 169                         break;
 170                 default:
 171                         link->status = DL_STATE_AVAILABLE;
 172                         break;
 173                 }
 174                 break;
 175         case DL_DEV_UNBINDING:
 176                 link->status = DL_STATE_SUPPLIER_UNBIND;
 177                 break;
 178         default:
 179                 link->status = DL_STATE_DORMANT;
 180                 break;
 181         }
 182 }
 183 
 184 static int device_reorder_to_tail(struct device *dev, void *not_used)
 185 {
 186         struct device_link *link;
 187 
 188         
 189 
 190 
 191 
 192         if (device_is_registered(dev))
 193                 devices_kset_move_last(dev);
 194 
 195         if (device_pm_initialized(dev))
 196                 device_pm_move_last(dev);
 197 
 198         device_for_each_child(dev, NULL, device_reorder_to_tail);
 199         list_for_each_entry(link, &dev->links.consumers, s_node)
 200                 device_reorder_to_tail(link->consumer, NULL);
 201 
 202         return 0;
 203 }
 204 
 205 
 206 
 207 
 208 
 209 
 210 
 211 
 212 
 213 
 214 void device_pm_move_to_tail(struct device *dev)
 215 {
 216         int idx;
 217 
 218         idx = device_links_read_lock();
 219         device_pm_lock();
 220         device_reorder_to_tail(dev, NULL);
 221         device_pm_unlock();
 222         device_links_read_unlock(idx);
 223 }
 224 
 225 #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
 226                                DL_FLAG_AUTOREMOVE_SUPPLIER | \
 227                                DL_FLAG_AUTOPROBE_CONSUMER)
 228 
 229 #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
 230                             DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
 231 
 232 
 233 
 234 
 235 
 236 
 237 
 238 
 239 
 240 
 241 
 242 
 243 
 244 
 245 
 246 
 247 
 248 
 249 
 250 
 251 
 252 
 253 
 254 
 255 
 256 
 257 
 258 
 259 
 260 
 261 
 262 
 263 
 264 
 265 
 266 
 267 
 268 
 269 
 270 
 271 
 272 
 273 
 274 
 275 
 276 
 277 
 278 
 279 
 280 
 281 
 282 
 283 
 284 
 285 
 286 
 287 
 288 struct device_link *device_link_add(struct device *consumer,
 289                                     struct device *supplier, u32 flags)
 290 {
 291         struct device_link *link;
 292 
 293         if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
 294             (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
 295             (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
 296              flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
 297                       DL_FLAG_AUTOREMOVE_SUPPLIER)))
 298                 return NULL;
 299 
 300         if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
 301                 if (pm_runtime_get_sync(supplier) < 0) {
 302                         pm_runtime_put_noidle(supplier);
 303                         return NULL;
 304                 }
 305         }
 306 
 307         if (!(flags & DL_FLAG_STATELESS))
 308                 flags |= DL_FLAG_MANAGED;
 309 
 310         device_links_write_lock();
 311         device_pm_lock();
 312 
 313         
 314 
 315 
 316 
 317 
 318         if (!device_pm_initialized(supplier)
 319             || device_is_dependent(consumer, supplier)) {
 320                 link = NULL;
 321                 goto out;
 322         }
 323 
 324         
 325 
 326 
 327 
 328 
 329         if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
 330                 flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
 331 
 332         list_for_each_entry(link, &supplier->links.consumers, s_node) {
 333                 if (link->consumer != consumer)
 334                         continue;
 335 
 336                 if (flags & DL_FLAG_PM_RUNTIME) {
 337                         if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
 338                                 pm_runtime_new_link(consumer);
 339                                 link->flags |= DL_FLAG_PM_RUNTIME;
 340                         }
 341                         if (flags & DL_FLAG_RPM_ACTIVE)
 342                                 refcount_inc(&link->rpm_active);
 343                 }
 344 
 345                 if (flags & DL_FLAG_STATELESS) {
 346                         link->flags |= DL_FLAG_STATELESS;
 347                         kref_get(&link->kref);
 348                         goto out;
 349                 }
 350 
 351                 
 352 
 353 
 354 
 355 
 356                 if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
 357                         if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
 358                                 link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
 359                                 link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
 360                         }
 361                 } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
 362                         link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
 363                                          DL_FLAG_AUTOREMOVE_SUPPLIER);
 364                 }
 365                 if (!(link->flags & DL_FLAG_MANAGED)) {
 366                         kref_get(&link->kref);
 367                         link->flags |= DL_FLAG_MANAGED;
 368                         device_link_init_status(link, consumer, supplier);
 369                 }
 370                 goto out;
 371         }
 372 
 373         link = kzalloc(sizeof(*link), GFP_KERNEL);
 374         if (!link)
 375                 goto out;
 376 
 377         refcount_set(&link->rpm_active, 1);
 378 
 379         if (flags & DL_FLAG_PM_RUNTIME) {
 380                 if (flags & DL_FLAG_RPM_ACTIVE)
 381                         refcount_inc(&link->rpm_active);
 382 
 383                 pm_runtime_new_link(consumer);
 384         }
 385 
 386         get_device(supplier);
 387         link->supplier = supplier;
 388         INIT_LIST_HEAD(&link->s_node);
 389         get_device(consumer);
 390         link->consumer = consumer;
 391         INIT_LIST_HEAD(&link->c_node);
 392         link->flags = flags;
 393         kref_init(&link->kref);
 394 
 395         
 396         if (flags & DL_FLAG_STATELESS)
 397                 link->status = DL_STATE_NONE;
 398         else
 399                 device_link_init_status(link, consumer, supplier);
 400 
 401         
 402 
 403 
 404 
 405         if (link->status == DL_STATE_CONSUMER_PROBE &&
 406             flags & DL_FLAG_PM_RUNTIME)
 407                 pm_runtime_resume(supplier);
 408 
 409         
 410 
 411 
 412 
 413 
 414 
 415 
 416         device_reorder_to_tail(consumer, NULL);
 417 
 418         list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
 419         list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
 420 
 421         dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
 422 
 423  out:
 424         device_pm_unlock();
 425         device_links_write_unlock();
 426 
 427         if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
 428                 pm_runtime_put(supplier);
 429 
 430         return link;
 431 }
 432 EXPORT_SYMBOL_GPL(device_link_add);
 433 
 434 static void device_link_free(struct device_link *link)
 435 {
 436         while (refcount_dec_not_one(&link->rpm_active))
 437                 pm_runtime_put(link->supplier);
 438 
 439         put_device(link->consumer);
 440         put_device(link->supplier);
 441         kfree(link);
 442 }
 443 
 444 #ifdef CONFIG_SRCU
 445 static void __device_link_free_srcu(struct rcu_head *rhead)
 446 {
 447         device_link_free(container_of(rhead, struct device_link, rcu_head));
 448 }
 449 
 450 static void __device_link_del(struct kref *kref)
 451 {
 452         struct device_link *link = container_of(kref, struct device_link, kref);
 453 
 454         dev_dbg(link->consumer, "Dropping the link to %s\n",
 455                 dev_name(link->supplier));
 456 
 457         if (link->flags & DL_FLAG_PM_RUNTIME)
 458                 pm_runtime_drop_link(link->consumer);
 459 
 460         list_del_rcu(&link->s_node);
 461         list_del_rcu(&link->c_node);
 462         call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
 463 }
 464 #else 
 465 static void __device_link_del(struct kref *kref)
 466 {
 467         struct device_link *link = container_of(kref, struct device_link, kref);
 468 
 469         dev_info(link->consumer, "Dropping the link to %s\n",
 470                  dev_name(link->supplier));
 471 
 472         if (link->flags & DL_FLAG_PM_RUNTIME)
 473                 pm_runtime_drop_link(link->consumer);
 474 
 475         list_del(&link->s_node);
 476         list_del(&link->c_node);
 477         device_link_free(link);
 478 }
 479 #endif 
 480 
 481 static void device_link_put_kref(struct device_link *link)
 482 {
 483         if (link->flags & DL_FLAG_STATELESS)
 484                 kref_put(&link->kref, __device_link_del);
 485         else
 486                 WARN(1, "Unable to drop a managed device link reference\n");
 487 }
 488 
 489 
 490 
 491 
 492 
 493 
 494 
 495 
 496 
 497 
 498 void device_link_del(struct device_link *link)
 499 {
 500         device_links_write_lock();
 501         device_pm_lock();
 502         device_link_put_kref(link);
 503         device_pm_unlock();
 504         device_links_write_unlock();
 505 }
 506 EXPORT_SYMBOL_GPL(device_link_del);
 507 
 508 
 509 
 510 
 511 
 512 
 513 
 514 
 515 
 516 void device_link_remove(void *consumer, struct device *supplier)
 517 {
 518         struct device_link *link;
 519 
 520         if (WARN_ON(consumer == supplier))
 521                 return;
 522 
 523         device_links_write_lock();
 524         device_pm_lock();
 525 
 526         list_for_each_entry(link, &supplier->links.consumers, s_node) {
 527                 if (link->consumer == consumer) {
 528                         device_link_put_kref(link);
 529                         break;
 530                 }
 531         }
 532 
 533         device_pm_unlock();
 534         device_links_write_unlock();
 535 }
 536 EXPORT_SYMBOL_GPL(device_link_remove);
 537 
 538 static void device_links_missing_supplier(struct device *dev)
 539 {
 540         struct device_link *link;
 541 
 542         list_for_each_entry(link, &dev->links.suppliers, c_node)
 543                 if (link->status == DL_STATE_CONSUMER_PROBE)
 544                         WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
 545 }
 546 
 547 
 548 
 549 
 550 
 551 
 552 
 553 
 554 
 555 
 556 
 557 
 558 
 559 
 560 
 561 
 562 
 563 int device_links_check_suppliers(struct device *dev)
 564 {
 565         struct device_link *link;
 566         int ret = 0;
 567 
 568         device_links_write_lock();
 569 
 570         list_for_each_entry(link, &dev->links.suppliers, c_node) {
 571                 if (!(link->flags & DL_FLAG_MANAGED))
 572                         continue;
 573 
 574                 if (link->status != DL_STATE_AVAILABLE) {
 575                         device_links_missing_supplier(dev);
 576                         ret = -EPROBE_DEFER;
 577                         break;
 578                 }
 579                 WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
 580         }
 581         dev->links.status = DL_DEV_PROBING;
 582 
 583         device_links_write_unlock();
 584         return ret;
 585 }
 586 
 587 
 588 
 589 
 590 
 591 
 592 
 593 
 594 
 595 
 596 
 597 
 598 void device_links_driver_bound(struct device *dev)
 599 {
 600         struct device_link *link;
 601 
 602         device_links_write_lock();
 603 
 604         list_for_each_entry(link, &dev->links.consumers, s_node) {
 605                 if (!(link->flags & DL_FLAG_MANAGED))
 606                         continue;
 607 
 608                 
 609 
 610 
 611 
 612 
 613 
 614                 if (link->status == DL_STATE_CONSUMER_PROBE ||
 615                     link->status == DL_STATE_ACTIVE)
 616                         continue;
 617 
 618                 WARN_ON(link->status != DL_STATE_DORMANT);
 619                 WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
 620 
 621                 if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
 622                         driver_deferred_probe_add(link->consumer);
 623         }
 624 
 625         list_for_each_entry(link, &dev->links.suppliers, c_node) {
 626                 if (!(link->flags & DL_FLAG_MANAGED))
 627                         continue;
 628 
 629                 WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
 630                 WRITE_ONCE(link->status, DL_STATE_ACTIVE);
 631         }
 632 
 633         dev->links.status = DL_DEV_DRIVER_BOUND;
 634 
 635         device_links_write_unlock();
 636 }
 637 
 638 static void device_link_drop_managed(struct device_link *link)
 639 {
 640         link->flags &= ~DL_FLAG_MANAGED;
 641         WRITE_ONCE(link->status, DL_STATE_NONE);
 642         kref_put(&link->kref, __device_link_del);
 643 }
 644 
 645 
 646 
 647 
 648 
 649 
 650 
 651 
 652 
 653 
 654 
 655 
 656 
 657 static void __device_links_no_driver(struct device *dev)
 658 {
 659         struct device_link *link, *ln;
 660 
 661         list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
 662                 if (!(link->flags & DL_FLAG_MANAGED))
 663                         continue;
 664 
 665                 if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
 666                         device_link_drop_managed(link);
 667                 else if (link->status == DL_STATE_CONSUMER_PROBE ||
 668                          link->status == DL_STATE_ACTIVE)
 669                         WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
 670         }
 671 
 672         dev->links.status = DL_DEV_NO_DRIVER;
 673 }
 674 
 675 
 676 
 677 
 678 
 679 
 680 
 681 
 682 
 683 
 684 
 685 void device_links_no_driver(struct device *dev)
 686 {
 687         struct device_link *link;
 688 
 689         device_links_write_lock();
 690 
 691         list_for_each_entry(link, &dev->links.consumers, s_node) {
 692                 if (!(link->flags & DL_FLAG_MANAGED))
 693                         continue;
 694 
 695                 
 696 
 697 
 698 
 699 
 700 
 701 
 702                 if (link->status == DL_STATE_CONSUMER_PROBE ||
 703                     link->status == DL_STATE_ACTIVE)
 704                         WRITE_ONCE(link->status, DL_STATE_DORMANT);
 705         }
 706 
 707         __device_links_no_driver(dev);
 708 
 709         device_links_write_unlock();
 710 }
 711 
 712 
 713 
 714 
 715 
 716 
 717 
 718 
 719 
 720 
 721 
 722 void device_links_driver_cleanup(struct device *dev)
 723 {
 724         struct device_link *link, *ln;
 725 
 726         device_links_write_lock();
 727 
 728         list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
 729                 if (!(link->flags & DL_FLAG_MANAGED))
 730                         continue;
 731 
 732                 WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
 733                 WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
 734 
 735                 
 736 
 737 
 738 
 739 
 740                 if (link->status == DL_STATE_SUPPLIER_UNBIND &&
 741                     link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
 742                         device_link_drop_managed(link);
 743 
 744                 WRITE_ONCE(link->status, DL_STATE_DORMANT);
 745         }
 746 
 747         __device_links_no_driver(dev);
 748 
 749         device_links_write_unlock();
 750 }
 751 
 752 
 753 
 754 
 755 
 756 
 757 
 758 
 759 
 760 
 761 
 762 
 763 
 764 
 765 
 766 bool device_links_busy(struct device *dev)
 767 {
 768         struct device_link *link;
 769         bool ret = false;
 770 
 771         device_links_write_lock();
 772 
 773         list_for_each_entry(link, &dev->links.consumers, s_node) {
 774                 if (!(link->flags & DL_FLAG_MANAGED))
 775                         continue;
 776 
 777                 if (link->status == DL_STATE_CONSUMER_PROBE
 778                     || link->status == DL_STATE_ACTIVE) {
 779                         ret = true;
 780                         break;
 781                 }
 782                 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
 783         }
 784 
 785         dev->links.status = DL_DEV_UNBINDING;
 786 
 787         device_links_write_unlock();
 788         return ret;
 789 }
 790 
 791 
 792 
 793 
 794 
 795 
 796 
 797 
 798 
 799 
 800 
 801 
 802 
 803 
 804 
 805 
 806 void device_links_unbind_consumers(struct device *dev)
 807 {
 808         struct device_link *link;
 809 
 810  start:
 811         device_links_write_lock();
 812 
 813         list_for_each_entry(link, &dev->links.consumers, s_node) {
 814                 enum device_link_state status;
 815 
 816                 if (!(link->flags & DL_FLAG_MANAGED))
 817                         continue;
 818 
 819                 status = link->status;
 820                 if (status == DL_STATE_CONSUMER_PROBE) {
 821                         device_links_write_unlock();
 822 
 823                         wait_for_device_probe();
 824                         goto start;
 825                 }
 826                 WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
 827                 if (status == DL_STATE_ACTIVE) {
 828                         struct device *consumer = link->consumer;
 829 
 830                         get_device(consumer);
 831 
 832                         device_links_write_unlock();
 833 
 834                         device_release_driver_internal(consumer, NULL,
 835                                                        consumer->parent);
 836                         put_device(consumer);
 837                         goto start;
 838                 }
 839         }
 840 
 841         device_links_write_unlock();
 842 }
 843 
 844 
 845 
 846 
 847 
 848 static void device_links_purge(struct device *dev)
 849 {
 850         struct device_link *link, *ln;
 851 
 852         
 853 
 854 
 855 
 856         device_links_write_lock();
 857 
 858         list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
 859                 WARN_ON(link->status == DL_STATE_ACTIVE);
 860                 __device_link_del(&link->kref);
 861         }
 862 
 863         list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
 864                 WARN_ON(link->status != DL_STATE_DORMANT &&
 865                         link->status != DL_STATE_NONE);
 866                 __device_link_del(&link->kref);
 867         }
 868 
 869         device_links_write_unlock();
 870 }
 871 
 872 
 873 
 874 int (*platform_notify)(struct device *dev) = NULL;
 875 int (*platform_notify_remove)(struct device *dev) = NULL;
 876 static struct kobject *dev_kobj;
 877 struct kobject *sysfs_dev_char_kobj;
 878 struct kobject *sysfs_dev_block_kobj;
 879 
 880 static DEFINE_MUTEX(device_hotplug_lock);
 881 
 882 void lock_device_hotplug(void)
 883 {
 884         mutex_lock(&device_hotplug_lock);
 885 }
 886 
 887 void unlock_device_hotplug(void)
 888 {
 889         mutex_unlock(&device_hotplug_lock);
 890 }
 891 
 892 int lock_device_hotplug_sysfs(void)
 893 {
 894         if (mutex_trylock(&device_hotplug_lock))
 895                 return 0;
 896 
 897         
 898         msleep(5);
 899         return restart_syscall();
 900 }
 901 
 902 #ifdef CONFIG_BLOCK
 903 static inline int device_is_not_partition(struct device *dev)
 904 {
 905         return !(dev->type == &part_type);
 906 }
 907 #else
 908 static inline int device_is_not_partition(struct device *dev)
 909 {
 910         return 1;
 911 }
 912 #endif
 913 
 914 static int
 915 device_platform_notify(struct device *dev, enum kobject_action action)
 916 {
 917         int ret;
 918 
 919         ret = acpi_platform_notify(dev, action);
 920         if (ret)
 921                 return ret;
 922 
 923         ret = software_node_notify(dev, action);
 924         if (ret)
 925                 return ret;
 926 
 927         if (platform_notify && action == KOBJ_ADD)
 928                 platform_notify(dev);
 929         else if (platform_notify_remove && action == KOBJ_REMOVE)
 930                 platform_notify_remove(dev);
 931         return 0;
 932 }
 933 
 934 
 935 
 936 
 937 
 938 
 939 
 940 
 941 
 942 
 943 const char *dev_driver_string(const struct device *dev)
 944 {
 945         struct device_driver *drv;
 946 
 947         
 948 
 949 
 950 
 951         drv = READ_ONCE(dev->driver);
 952         return drv ? drv->name :
 953                         (dev->bus ? dev->bus->name :
 954                         (dev->class ? dev->class->name : ""));
 955 }
 956 EXPORT_SYMBOL(dev_driver_string);
 957 
 958 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
 959 
 960 static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
 961                              char *buf)
 962 {
 963         struct device_attribute *dev_attr = to_dev_attr(attr);
 964         struct device *dev = kobj_to_dev(kobj);
 965         ssize_t ret = -EIO;
 966 
 967         if (dev_attr->show)
 968                 ret = dev_attr->show(dev, dev_attr, buf);
 969         if (ret >= (ssize_t)PAGE_SIZE) {
 970                 printk("dev_attr_show: %pS returned bad count\n",
 971                                 dev_attr->show);
 972         }
 973         return ret;
 974 }
 975 
 976 static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
 977                               const char *buf, size_t count)
 978 {
 979         struct device_attribute *dev_attr = to_dev_attr(attr);
 980         struct device *dev = kobj_to_dev(kobj);
 981         ssize_t ret = -EIO;
 982 
 983         if (dev_attr->store)
 984                 ret = dev_attr->store(dev, dev_attr, buf, count);
 985         return ret;
 986 }
 987 
 988 static const struct sysfs_ops dev_sysfs_ops = {
 989         .show   = dev_attr_show,
 990         .store  = dev_attr_store,
 991 };
 992 
 993 #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
 994 
 995 ssize_t device_store_ulong(struct device *dev,
 996                            struct device_attribute *attr,
 997                            const char *buf, size_t size)
 998 {
 999         struct dev_ext_attribute *ea = to_ext_attr(attr);
1000         int ret;
1001         unsigned long new;
1002 
1003         ret = kstrtoul(buf, 0, &new);
1004         if (ret)
1005                 return ret;
1006         *(unsigned long *)(ea->var) = new;
1007         
1008         return size;
1009 }
1010 EXPORT_SYMBOL_GPL(device_store_ulong);
1011 
1012 ssize_t device_show_ulong(struct device *dev,
1013                           struct device_attribute *attr,
1014                           char *buf)
1015 {
1016         struct dev_ext_attribute *ea = to_ext_attr(attr);
1017         return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
1018 }
1019 EXPORT_SYMBOL_GPL(device_show_ulong);
1020 
1021 ssize_t device_store_int(struct device *dev,
1022                          struct device_attribute *attr,
1023                          const char *buf, size_t size)
1024 {
1025         struct dev_ext_attribute *ea = to_ext_attr(attr);
1026         int ret;
1027         long new;
1028 
1029         ret = kstrtol(buf, 0, &new);
1030         if (ret)
1031                 return ret;
1032 
1033         if (new > INT_MAX || new < INT_MIN)
1034                 return -EINVAL;
1035         *(int *)(ea->var) = new;
1036         
1037         return size;
1038 }
1039 EXPORT_SYMBOL_GPL(device_store_int);
1040 
1041 ssize_t device_show_int(struct device *dev,
1042                         struct device_attribute *attr,
1043                         char *buf)
1044 {
1045         struct dev_ext_attribute *ea = to_ext_attr(attr);
1046 
1047         return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
1048 }
1049 EXPORT_SYMBOL_GPL(device_show_int);
1050 
1051 ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
1052                           const char *buf, size_t size)
1053 {
1054         struct dev_ext_attribute *ea = to_ext_attr(attr);
1055 
1056         if (strtobool(buf, ea->var) < 0)
1057                 return -EINVAL;
1058 
1059         return size;
1060 }
1061 EXPORT_SYMBOL_GPL(device_store_bool);
1062 
1063 ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
1064                          char *buf)
1065 {
1066         struct dev_ext_attribute *ea = to_ext_attr(attr);
1067 
1068         return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
1069 }
1070 EXPORT_SYMBOL_GPL(device_show_bool);
1071 
1072 
1073 
1074 
1075 
1076 
1077 
1078 
1079 
1080 static void device_release(struct kobject *kobj)
1081 {
1082         struct device *dev = kobj_to_dev(kobj);
1083         struct device_private *p = dev->p;
1084 
1085         
1086 
1087 
1088 
1089 
1090 
1091 
1092 
1093 
1094         devres_release_all(dev);
1095 
1096         if (dev->release)
1097                 dev->release(dev);
1098         else if (dev->type && dev->type->release)
1099                 dev->type->release(dev);
1100         else if (dev->class && dev->class->dev_release)
1101                 dev->class->dev_release(dev);
1102         else
1103                 WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
1104                         dev_name(dev));
1105         kfree(p);
1106 }
1107 
1108 static const void *device_namespace(struct kobject *kobj)
1109 {
1110         struct device *dev = kobj_to_dev(kobj);
1111         const void *ns = NULL;
1112 
1113         if (dev->class && dev->class->ns_type)
1114                 ns = dev->class->namespace(dev);
1115 
1116         return ns;
1117 }
1118 
1119 static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
1120 {
1121         struct device *dev = kobj_to_dev(kobj);
1122 
1123         if (dev->class && dev->class->get_ownership)
1124                 dev->class->get_ownership(dev, uid, gid);
1125 }
1126 
1127 static struct kobj_type device_ktype = {
1128         .release        = device_release,
1129         .sysfs_ops      = &dev_sysfs_ops,
1130         .namespace      = device_namespace,
1131         .get_ownership  = device_get_ownership,
1132 };
1133 
1134 
1135 static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
1136 {
1137         struct kobj_type *ktype = get_ktype(kobj);
1138 
1139         if (ktype == &device_ktype) {
1140                 struct device *dev = kobj_to_dev(kobj);
1141                 if (dev->bus)
1142                         return 1;
1143                 if (dev->class)
1144                         return 1;
1145         }
1146         return 0;
1147 }
1148 
1149 static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
1150 {
1151         struct device *dev = kobj_to_dev(kobj);
1152 
1153         if (dev->bus)
1154                 return dev->bus->name;
1155         if (dev->class)
1156                 return dev->class->name;
1157         return NULL;
1158 }
1159 
1160 static int dev_uevent(struct kset *kset, struct kobject *kobj,
1161                       struct kobj_uevent_env *env)
1162 {
1163         struct device *dev = kobj_to_dev(kobj);
1164         int retval = 0;
1165 
1166         
1167         if (MAJOR(dev->devt)) {
1168                 const char *tmp;
1169                 const char *name;
1170                 umode_t mode = 0;
1171                 kuid_t uid = GLOBAL_ROOT_UID;
1172                 kgid_t gid = GLOBAL_ROOT_GID;
1173 
1174                 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
1175                 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
1176                 name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
1177                 if (name) {
1178                         add_uevent_var(env, "DEVNAME=%s", name);
1179                         if (mode)
1180                                 add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
1181                         if (!uid_eq(uid, GLOBAL_ROOT_UID))
1182                                 add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
1183                         if (!gid_eq(gid, GLOBAL_ROOT_GID))
1184                                 add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
1185                         kfree(tmp);
1186                 }
1187         }
1188 
1189         if (dev->type && dev->type->name)
1190                 add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
1191 
1192         if (dev->driver)
1193                 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
1194 
1195         
1196         of_device_uevent(dev, env);
1197 
1198         
1199         if (dev->bus && dev->bus->uevent) {
1200                 retval = dev->bus->uevent(dev, env);
1201                 if (retval)
1202                         pr_debug("device: '%s': %s: bus uevent() returned %d\n",
1203                                  dev_name(dev), __func__, retval);
1204         }
1205 
1206         
1207         if (dev->class && dev->class->dev_uevent) {
1208                 retval = dev->class->dev_uevent(dev, env);
1209                 if (retval)
1210                         pr_debug("device: '%s': %s: class uevent() "
1211                                  "returned %d\n", dev_name(dev),
1212                                  __func__, retval);
1213         }
1214 
1215         
1216         if (dev->type && dev->type->uevent) {
1217                 retval = dev->type->uevent(dev, env);
1218                 if (retval)
1219                         pr_debug("device: '%s': %s: dev_type uevent() "
1220                                  "returned %d\n", dev_name(dev),
1221                                  __func__, retval);
1222         }
1223 
1224         return retval;
1225 }
1226 
1227 static const struct kset_uevent_ops device_uevent_ops = {
1228         .filter =       dev_uevent_filter,
1229         .name =         dev_uevent_name,
1230         .uevent =       dev_uevent,
1231 };
1232 
1233 static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
1234                            char *buf)
1235 {
1236         struct kobject *top_kobj;
1237         struct kset *kset;
1238         struct kobj_uevent_env *env = NULL;
1239         int i;
1240         size_t count = 0;
1241         int retval;
1242 
1243         
1244         top_kobj = &dev->kobj;
1245         while (!top_kobj->kset && top_kobj->parent)
1246                 top_kobj = top_kobj->parent;
1247         if (!top_kobj->kset)
1248                 goto out;
1249 
1250         kset = top_kobj->kset;
1251         if (!kset->uevent_ops || !kset->uevent_ops->uevent)
1252                 goto out;
1253 
1254         
1255         if (kset->uevent_ops && kset->uevent_ops->filter)
1256                 if (!kset->uevent_ops->filter(kset, &dev->kobj))
1257                         goto out;
1258 
1259         env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
1260         if (!env)
1261                 return -ENOMEM;
1262 
1263         
1264         retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
1265         if (retval)
1266                 goto out;
1267 
1268         
1269         for (i = 0; i < env->envp_idx; i++)
1270                 count += sprintf(&buf[count], "%s\n", env->envp[i]);
1271 out:
1272         kfree(env);
1273         return count;
1274 }
1275 
1276 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
1277                             const char *buf, size_t count)
1278 {
1279         int rc;
1280 
1281         rc = kobject_synth_uevent(&dev->kobj, buf, count);
1282 
1283         if (rc) {
1284                 dev_err(dev, "uevent: failed to send synthetic uevent\n");
1285                 return rc;
1286         }
1287 
1288         return count;
1289 }
1290 static DEVICE_ATTR_RW(uevent);
1291 
1292 static ssize_t online_show(struct device *dev, struct device_attribute *attr,
1293                            char *buf)
1294 {
1295         bool val;
1296 
1297         device_lock(dev);
1298         val = !dev->offline;
1299         device_unlock(dev);
1300         return sprintf(buf, "%u\n", val);
1301 }
1302 
1303 static ssize_t online_store(struct device *dev, struct device_attribute *attr,
1304                             const char *buf, size_t count)
1305 {
1306         bool val;
1307         int ret;
1308 
1309         ret = strtobool(buf, &val);
1310         if (ret < 0)
1311                 return ret;
1312 
1313         ret = lock_device_hotplug_sysfs();
1314         if (ret)
1315                 return ret;
1316 
1317         ret = val ? device_online(dev) : device_offline(dev);
1318         unlock_device_hotplug();
1319         return ret < 0 ? ret : count;
1320 }
1321 static DEVICE_ATTR_RW(online);
1322 
1323 int device_add_groups(struct device *dev, const struct attribute_group **groups)
1324 {
1325         return sysfs_create_groups(&dev->kobj, groups);
1326 }
1327 EXPORT_SYMBOL_GPL(device_add_groups);
1328 
1329 void device_remove_groups(struct device *dev,
1330                           const struct attribute_group **groups)
1331 {
1332         sysfs_remove_groups(&dev->kobj, groups);
1333 }
1334 EXPORT_SYMBOL_GPL(device_remove_groups);
1335 
1336 union device_attr_group_devres {
1337         const struct attribute_group *group;
1338         const struct attribute_group **groups;
1339 };
1340 
1341 static int devm_attr_group_match(struct device *dev, void *res, void *data)
1342 {
1343         return ((union device_attr_group_devres *)res)->group == data;
1344 }
1345 
1346 static void devm_attr_group_remove(struct device *dev, void *res)
1347 {
1348         union device_attr_group_devres *devres = res;
1349         const struct attribute_group *group = devres->group;
1350 
1351         dev_dbg(dev, "%s: removing group %p\n", __func__, group);
1352         sysfs_remove_group(&dev->kobj, group);
1353 }
1354 
1355 static void devm_attr_groups_remove(struct device *dev, void *res)
1356 {
1357         union device_attr_group_devres *devres = res;
1358         const struct attribute_group **groups = devres->groups;
1359 
1360         dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
1361         sysfs_remove_groups(&dev->kobj, groups);
1362 }
1363 
1364 
1365 
1366 
1367 
1368 
1369 
1370 
1371 
1372 
1373 
1374 int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
1375 {
1376         union device_attr_group_devres *devres;
1377         int error;
1378 
1379         devres = devres_alloc(devm_attr_group_remove,
1380                               sizeof(*devres), GFP_KERNEL);
1381         if (!devres)
1382                 return -ENOMEM;
1383 
1384         error = sysfs_create_group(&dev->kobj, grp);
1385         if (error) {
1386                 devres_free(devres);
1387                 return error;
1388         }
1389 
1390         devres->group = grp;
1391         devres_add(dev, devres);
1392         return 0;
1393 }
1394 EXPORT_SYMBOL_GPL(devm_device_add_group);
1395 
1396 
1397 
1398 
1399 
1400 
1401 
1402 
1403 
1404 void devm_device_remove_group(struct device *dev,
1405                               const struct attribute_group *grp)
1406 {
1407         WARN_ON(devres_release(dev, devm_attr_group_remove,
1408                                devm_attr_group_match,
1409                                 (void *)grp));
1410 }
1411 EXPORT_SYMBOL_GPL(devm_device_remove_group);
1412 
1413 
1414 
1415 
1416 
1417 
1418 
1419 
1420 
1421 
1422 
1423 
1424 
1425 
1426 int devm_device_add_groups(struct device *dev,
1427                            const struct attribute_group **groups)
1428 {
1429         union device_attr_group_devres *devres;
1430         int error;
1431 
1432         devres = devres_alloc(devm_attr_groups_remove,
1433                               sizeof(*devres), GFP_KERNEL);
1434         if (!devres)
1435                 return -ENOMEM;
1436 
1437         error = sysfs_create_groups(&dev->kobj, groups);
1438         if (error) {
1439                 devres_free(devres);
1440                 return error;
1441         }
1442 
1443         devres->groups = groups;
1444         devres_add(dev, devres);
1445         return 0;
1446 }
1447 EXPORT_SYMBOL_GPL(devm_device_add_groups);
1448 
1449 
1450 
1451 
1452 
1453 
1454 
1455 
1456 
1457 void devm_device_remove_groups(struct device *dev,
1458                                const struct attribute_group **groups)
1459 {
1460         WARN_ON(devres_release(dev, devm_attr_groups_remove,
1461                                devm_attr_group_match,
1462                                 (void *)groups));
1463 }
1464 EXPORT_SYMBOL_GPL(devm_device_remove_groups);
1465 
1466 static int device_add_attrs(struct device *dev)
1467 {
1468         struct class *class = dev->class;
1469         const struct device_type *type = dev->type;
1470         int error;
1471 
1472         if (class) {
1473                 error = device_add_groups(dev, class->dev_groups);
1474                 if (error)
1475                         return error;
1476         }
1477 
1478         if (type) {
1479                 error = device_add_groups(dev, type->groups);
1480                 if (error)
1481                         goto err_remove_class_groups;
1482         }
1483 
1484         error = device_add_groups(dev, dev->groups);
1485         if (error)
1486                 goto err_remove_type_groups;
1487 
1488         if (device_supports_offline(dev) && !dev->offline_disabled) {
1489                 error = device_create_file(dev, &dev_attr_online);
1490                 if (error)
1491                         goto err_remove_dev_groups;
1492         }
1493 
1494         return 0;
1495 
1496  err_remove_dev_groups:
1497         device_remove_groups(dev, dev->groups);
1498  err_remove_type_groups:
1499         if (type)
1500                 device_remove_groups(dev, type->groups);
1501  err_remove_class_groups:
1502         if (class)
1503                 device_remove_groups(dev, class->dev_groups);
1504 
1505         return error;
1506 }
1507 
1508 static void device_remove_attrs(struct device *dev)
1509 {
1510         struct class *class = dev->class;
1511         const struct device_type *type = dev->type;
1512 
1513         device_remove_file(dev, &dev_attr_online);
1514         device_remove_groups(dev, dev->groups);
1515 
1516         if (type)
1517                 device_remove_groups(dev, type->groups);
1518 
1519         if (class)
1520                 device_remove_groups(dev, class->dev_groups);
1521 }
1522 
1523 static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
1524                         char *buf)
1525 {
1526         return print_dev_t(buf, dev->devt);
1527 }
1528 static DEVICE_ATTR_RO(dev);
1529 
1530 
1531 struct kset *devices_kset;
1532 
1533 
1534 
1535 
1536 
1537 
1538 static void devices_kset_move_before(struct device *deva, struct device *devb)
1539 {
1540         if (!devices_kset)
1541                 return;
1542         pr_debug("devices_kset: Moving %s before %s\n",
1543                  dev_name(deva), dev_name(devb));
1544         spin_lock(&devices_kset->list_lock);
1545         list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
1546         spin_unlock(&devices_kset->list_lock);
1547 }
1548 
1549 
1550 
1551 
1552 
1553 
1554 static void devices_kset_move_after(struct device *deva, struct device *devb)
1555 {
1556         if (!devices_kset)
1557                 return;
1558         pr_debug("devices_kset: Moving %s after %s\n",
1559                  dev_name(deva), dev_name(devb));
1560         spin_lock(&devices_kset->list_lock);
1561         list_move(&deva->kobj.entry, &devb->kobj.entry);
1562         spin_unlock(&devices_kset->list_lock);
1563 }
1564 
1565 
1566 
1567 
1568 
1569 void devices_kset_move_last(struct device *dev)
1570 {
1571         if (!devices_kset)
1572                 return;
1573         pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
1574         spin_lock(&devices_kset->list_lock);
1575         list_move_tail(&dev->kobj.entry, &devices_kset->list);
1576         spin_unlock(&devices_kset->list_lock);
1577 }
1578 
1579 
1580 
1581 
1582 
1583 
1584 int device_create_file(struct device *dev,
1585                        const struct device_attribute *attr)
1586 {
1587         int error = 0;
1588 
1589         if (dev) {
1590                 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
1591                         "Attribute %s: write permission without 'store'\n",
1592                         attr->attr.name);
1593                 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
1594                         "Attribute %s: read permission without 'show'\n",
1595                         attr->attr.name);
1596                 error = sysfs_create_file(&dev->kobj, &attr->attr);
1597         }
1598 
1599         return error;
1600 }
1601 EXPORT_SYMBOL_GPL(device_create_file);
1602 
1603 
1604 
1605 
1606 
1607 
1608 void device_remove_file(struct device *dev,
1609                         const struct device_attribute *attr)
1610 {
1611         if (dev)
1612                 sysfs_remove_file(&dev->kobj, &attr->attr);
1613 }
1614 EXPORT_SYMBOL_GPL(device_remove_file);
1615 
1616 
1617 
1618 
1619 
1620 
1621 
1622 
1623 bool device_remove_file_self(struct device *dev,
1624                              const struct device_attribute *attr)
1625 {
1626         if (dev)
1627                 return sysfs_remove_file_self(&dev->kobj, &attr->attr);
1628         else
1629                 return false;
1630 }
1631 EXPORT_SYMBOL_GPL(device_remove_file_self);
1632 
1633 
1634 
1635 
1636 
1637 
1638 int device_create_bin_file(struct device *dev,
1639                            const struct bin_attribute *attr)
1640 {
1641         int error = -EINVAL;
1642         if (dev)
1643                 error = sysfs_create_bin_file(&dev->kobj, attr);
1644         return error;
1645 }
1646 EXPORT_SYMBOL_GPL(device_create_bin_file);
1647 
1648 
1649 
1650 
1651 
1652 
1653 void device_remove_bin_file(struct device *dev,
1654                             const struct bin_attribute *attr)
1655 {
1656         if (dev)
1657                 sysfs_remove_bin_file(&dev->kobj, attr);
1658 }
1659 EXPORT_SYMBOL_GPL(device_remove_bin_file);
1660 
1661 static void klist_children_get(struct klist_node *n)
1662 {
1663         struct device_private *p = to_device_private_parent(n);
1664         struct device *dev = p->device;
1665 
1666         get_device(dev);
1667 }
1668 
1669 static void klist_children_put(struct klist_node *n)
1670 {
1671         struct device_private *p = to_device_private_parent(n);
1672         struct device *dev = p->device;
1673 
1674         put_device(dev);
1675 }
1676 
1677 
1678 
1679 
1680 
1681 
1682 
1683 
1684 
1685 
1686 
1687 
1688 
1689 
1690 
1691 
1692 
1693 
1694 
1695 
1696 
1697 void device_initialize(struct device *dev)
1698 {
1699         dev->kobj.kset = devices_kset;
1700         kobject_init(&dev->kobj, &device_ktype);
1701         INIT_LIST_HEAD(&dev->dma_pools);
1702         mutex_init(&dev->mutex);
1703 #ifdef CONFIG_PROVE_LOCKING
1704         mutex_init(&dev->lockdep_mutex);
1705 #endif
1706         lockdep_set_novalidate_class(&dev->mutex);
1707         spin_lock_init(&dev->devres_lock);
1708         INIT_LIST_HEAD(&dev->devres_head);
1709         device_pm_init(dev);
1710         set_dev_node(dev, -1);
1711 #ifdef CONFIG_GENERIC_MSI_IRQ
1712         INIT_LIST_HEAD(&dev->msi_list);
1713 #endif
1714         INIT_LIST_HEAD(&dev->links.consumers);
1715         INIT_LIST_HEAD(&dev->links.suppliers);
1716         dev->links.status = DL_DEV_NO_DRIVER;
1717 }
1718 EXPORT_SYMBOL_GPL(device_initialize);
1719 
1720 struct kobject *virtual_device_parent(struct device *dev)
1721 {
1722         static struct kobject *virtual_dir = NULL;
1723 
1724         if (!virtual_dir)
1725                 virtual_dir = kobject_create_and_add("virtual",
1726                                                      &devices_kset->kobj);
1727 
1728         return virtual_dir;
1729 }
1730 
1731 struct class_dir {
1732         struct kobject kobj;
1733         struct class *class;
1734 };
1735 
1736 #define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
1737 
1738 static void class_dir_release(struct kobject *kobj)
1739 {
1740         struct class_dir *dir = to_class_dir(kobj);
1741         kfree(dir);
1742 }
1743 
1744 static const
1745 struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
1746 {
1747         struct class_dir *dir = to_class_dir(kobj);
1748         return dir->class->ns_type;
1749 }
1750 
1751 static struct kobj_type class_dir_ktype = {
1752         .release        = class_dir_release,
1753         .sysfs_ops      = &kobj_sysfs_ops,
1754         .child_ns_type  = class_dir_child_ns_type
1755 };
1756 
1757 static struct kobject *
1758 class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
1759 {
1760         struct class_dir *dir;
1761         int retval;
1762 
1763         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1764         if (!dir)
1765                 return ERR_PTR(-ENOMEM);
1766 
1767         dir->class = class;
1768         kobject_init(&dir->kobj, &class_dir_ktype);
1769 
1770         dir->kobj.kset = &class->p->glue_dirs;
1771 
1772         retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
1773         if (retval < 0) {
1774                 kobject_put(&dir->kobj);
1775                 return ERR_PTR(retval);
1776         }
1777         return &dir->kobj;
1778 }
1779 
1780 static DEFINE_MUTEX(gdp_mutex);
1781 
1782 static struct kobject *get_device_parent(struct device *dev,
1783                                          struct device *parent)
1784 {
1785         if (dev->class) {
1786                 struct kobject *kobj = NULL;
1787                 struct kobject *parent_kobj;
1788                 struct kobject *k;
1789 
1790 #ifdef CONFIG_BLOCK
1791                 
1792                 if (sysfs_deprecated && dev->class == &block_class) {
1793                         if (parent && parent->class == &block_class)
1794                                 return &parent->kobj;
1795                         return &block_class.p->subsys.kobj;
1796                 }
1797 #endif
1798 
1799                 
1800 
1801 
1802 
1803 
1804                 if (parent == NULL)
1805                         parent_kobj = virtual_device_parent(dev);
1806                 else if (parent->class && !dev->class->ns_type)
1807                         return &parent->kobj;
1808                 else
1809                         parent_kobj = &parent->kobj;
1810 
1811                 mutex_lock(&gdp_mutex);
1812 
1813                 
1814                 spin_lock(&dev->class->p->glue_dirs.list_lock);
1815                 list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
1816                         if (k->parent == parent_kobj) {
1817                                 kobj = kobject_get(k);
1818                                 break;
1819                         }
1820                 spin_unlock(&dev->class->p->glue_dirs.list_lock);
1821                 if (kobj) {
1822                         mutex_unlock(&gdp_mutex);
1823                         return kobj;
1824                 }
1825 
1826                 
1827                 k = class_dir_create_and_add(dev->class, parent_kobj);
1828                 
1829                 mutex_unlock(&gdp_mutex);
1830                 return k;
1831         }
1832 
1833         
1834         if (!parent && dev->bus && dev->bus->dev_root)
1835                 return &dev->bus->dev_root->kobj;
1836 
1837         if (parent)
1838                 return &parent->kobj;
1839         return NULL;
1840 }
1841 
1842 static inline bool live_in_glue_dir(struct kobject *kobj,
1843                                     struct device *dev)
1844 {
1845         if (!kobj || !dev->class ||
1846             kobj->kset != &dev->class->p->glue_dirs)
1847                 return false;
1848         return true;
1849 }
1850 
1851 static inline struct kobject *get_glue_dir(struct device *dev)
1852 {
1853         return dev->kobj.parent;
1854 }
1855 
1856 
1857 
1858 
1859 
1860 
1861 static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
1862 {
1863         unsigned int ref;
1864 
1865         
1866         if (!live_in_glue_dir(glue_dir, dev))
1867                 return;
1868 
1869         mutex_lock(&gdp_mutex);
1870         
1871 
1872 
1873 
1874 
1875 
1876 
1877 
1878 
1879 
1880 
1881 
1882 
1883 
1884 
1885 
1886 
1887 
1888 
1889 
1890 
1891 
1892 
1893 
1894 
1895 
1896 
1897 
1898 
1899 
1900 
1901 
1902 
1903 
1904 
1905 
1906 
1907 
1908 
1909 
1910 
1911 
1912 
1913 
1914 
1915 
1916 
1917 
1918         ref = kref_read(&glue_dir->kref);
1919         if (!kobject_has_children(glue_dir) && !--ref)
1920                 kobject_del(glue_dir);
1921         kobject_put(glue_dir);
1922         mutex_unlock(&gdp_mutex);
1923 }
1924 
1925 static int device_add_class_symlinks(struct device *dev)
1926 {
1927         struct device_node *of_node = dev_of_node(dev);
1928         int error;
1929 
1930         if (of_node) {
1931                 error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
1932                 if (error)
1933                         dev_warn(dev, "Error %d creating of_node link\n",error);
1934                 
1935         }
1936 
1937         if (!dev->class)
1938                 return 0;
1939 
1940         error = sysfs_create_link(&dev->kobj,
1941                                   &dev->class->p->subsys.kobj,
1942                                   "subsystem");
1943         if (error)
1944                 goto out_devnode;
1945 
1946         if (dev->parent && device_is_not_partition(dev)) {
1947                 error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
1948                                           "device");
1949                 if (error)
1950                         goto out_subsys;
1951         }
1952 
1953 #ifdef CONFIG_BLOCK
1954         
1955         if (sysfs_deprecated && dev->class == &block_class)
1956                 return 0;
1957 #endif
1958 
1959         
1960         error = sysfs_create_link(&dev->class->p->subsys.kobj,
1961                                   &dev->kobj, dev_name(dev));
1962         if (error)
1963                 goto out_device;
1964 
1965         return 0;
1966 
1967 out_device:
1968         sysfs_remove_link(&dev->kobj, "device");
1969 
1970 out_subsys:
1971         sysfs_remove_link(&dev->kobj, "subsystem");
1972 out_devnode:
1973         sysfs_remove_link(&dev->kobj, "of_node");
1974         return error;
1975 }
1976 
1977 static void device_remove_class_symlinks(struct device *dev)
1978 {
1979         if (dev_of_node(dev))
1980                 sysfs_remove_link(&dev->kobj, "of_node");
1981 
1982         if (!dev->class)
1983                 return;
1984 
1985         if (dev->parent && device_is_not_partition(dev))
1986                 sysfs_remove_link(&dev->kobj, "device");
1987         sysfs_remove_link(&dev->kobj, "subsystem");
1988 #ifdef CONFIG_BLOCK
1989         if (sysfs_deprecated && dev->class == &block_class)
1990                 return;
1991 #endif
1992         sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
1993 }
1994 
1995 
1996 
1997 
1998 
1999 
2000 int dev_set_name(struct device *dev, const char *fmt, ...)
2001 {
2002         va_list vargs;
2003         int err;
2004 
2005         va_start(vargs, fmt);
2006         err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
2007         va_end(vargs);
2008         return err;
2009 }
2010 EXPORT_SYMBOL_GPL(dev_set_name);
2011 
2012 
2013 
2014 
2015 
2016 
2017 
2018 
2019 
2020 
2021 
2022 
2023 static struct kobject *device_to_dev_kobj(struct device *dev)
2024 {
2025         struct kobject *kobj;
2026 
2027         if (dev->class)
2028                 kobj = dev->class->dev_kobj;
2029         else
2030                 kobj = sysfs_dev_char_kobj;
2031 
2032         return kobj;
2033 }
2034 
2035 static int device_create_sys_dev_entry(struct device *dev)
2036 {
2037         struct kobject *kobj = device_to_dev_kobj(dev);
2038         int error = 0;
2039         char devt_str[15];
2040 
2041         if (kobj) {
2042                 format_dev_t(devt_str, dev->devt);
2043                 error = sysfs_create_link(kobj, &dev->kobj, devt_str);
2044         }
2045 
2046         return error;
2047 }
2048 
2049 static void device_remove_sys_dev_entry(struct device *dev)
2050 {
2051         struct kobject *kobj = device_to_dev_kobj(dev);
2052         char devt_str[15];
2053 
2054         if (kobj) {
2055                 format_dev_t(devt_str, dev->devt);
2056                 sysfs_remove_link(kobj, devt_str);
2057         }
2058 }
2059 
2060 static int device_private_init(struct device *dev)
2061 {
2062         dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
2063         if (!dev->p)
2064                 return -ENOMEM;
2065         dev->p->device = dev;
2066         klist_init(&dev->p->klist_children, klist_children_get,
2067                    klist_children_put);
2068         INIT_LIST_HEAD(&dev->p->deferred_probe);
2069         return 0;
2070 }
2071 
2072 
2073 
2074 
2075 
2076 
2077 
2078 
2079 
2080 
2081 
2082 
2083 
2084 
2085 
2086 
2087 
2088 
2089 
2090 
2091 
2092 
2093 
2094 
2095 
2096 
2097 
2098 
2099 int device_add(struct device *dev)
2100 {
2101         struct device *parent;
2102         struct kobject *kobj;
2103         struct class_interface *class_intf;
2104         int error = -EINVAL;
2105         struct kobject *glue_dir = NULL;
2106 
2107         dev = get_device(dev);
2108         if (!dev)
2109                 goto done;
2110 
2111         if (!dev->p) {
2112                 error = device_private_init(dev);
2113                 if (error)
2114                         goto done;
2115         }
2116 
2117         
2118 
2119 
2120 
2121 
2122         if (dev->init_name) {
2123                 dev_set_name(dev, "%s", dev->init_name);
2124                 dev->init_name = NULL;
2125         }
2126 
2127         
2128         if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
2129                 dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
2130 
2131         if (!dev_name(dev)) {
2132                 error = -EINVAL;
2133                 goto name_error;
2134         }
2135 
2136         pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2137 
2138         parent = get_device(dev->parent);
2139         kobj = get_device_parent(dev, parent);
2140         if (IS_ERR(kobj)) {
2141                 error = PTR_ERR(kobj);
2142                 goto parent_error;
2143         }
2144         if (kobj)
2145                 dev->kobj.parent = kobj;
2146 
2147         
2148         if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
2149                 set_dev_node(dev, dev_to_node(parent));
2150 
2151         
2152         
2153         error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
2154         if (error) {
2155                 glue_dir = get_glue_dir(dev);
2156                 goto Error;
2157         }
2158 
2159         
2160         error = device_platform_notify(dev, KOBJ_ADD);
2161         if (error)
2162                 goto platform_error;
2163 
2164         error = device_create_file(dev, &dev_attr_uevent);
2165         if (error)
2166                 goto attrError;
2167 
2168         error = device_add_class_symlinks(dev);
2169         if (error)
2170                 goto SymlinkError;
2171         error = device_add_attrs(dev);
2172         if (error)
2173                 goto AttrsError;
2174         error = bus_add_device(dev);
2175         if (error)
2176                 goto BusError;
2177         error = dpm_sysfs_add(dev);
2178         if (error)
2179                 goto DPMError;
2180         device_pm_add(dev);
2181 
2182         if (MAJOR(dev->devt)) {
2183                 error = device_create_file(dev, &dev_attr_dev);
2184                 if (error)
2185                         goto DevAttrError;
2186 
2187                 error = device_create_sys_dev_entry(dev);
2188                 if (error)
2189                         goto SysEntryError;
2190 
2191                 devtmpfs_create_node(dev);
2192         }
2193 
2194         
2195 
2196 
2197         if (dev->bus)
2198                 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2199                                              BUS_NOTIFY_ADD_DEVICE, dev);
2200 
2201         kobject_uevent(&dev->kobj, KOBJ_ADD);
2202         bus_probe_device(dev);
2203         if (parent)
2204                 klist_add_tail(&dev->p->knode_parent,
2205                                &parent->p->klist_children);
2206 
2207         if (dev->class) {
2208                 mutex_lock(&dev->class->p->mutex);
2209                 
2210                 klist_add_tail(&dev->p->knode_class,
2211                                &dev->class->p->klist_devices);
2212 
2213                 
2214                 list_for_each_entry(class_intf,
2215                                     &dev->class->p->interfaces, node)
2216                         if (class_intf->add_dev)
2217                                 class_intf->add_dev(dev, class_intf);
2218                 mutex_unlock(&dev->class->p->mutex);
2219         }
2220 done:
2221         put_device(dev);
2222         return error;
2223  SysEntryError:
2224         if (MAJOR(dev->devt))
2225                 device_remove_file(dev, &dev_attr_dev);
2226  DevAttrError:
2227         device_pm_remove(dev);
2228         dpm_sysfs_remove(dev);
2229  DPMError:
2230         bus_remove_device(dev);
2231  BusError:
2232         device_remove_attrs(dev);
2233  AttrsError:
2234         device_remove_class_symlinks(dev);
2235  SymlinkError:
2236         device_remove_file(dev, &dev_attr_uevent);
2237  attrError:
2238         device_platform_notify(dev, KOBJ_REMOVE);
2239 platform_error:
2240         kobject_uevent(&dev->kobj, KOBJ_REMOVE);
2241         glue_dir = get_glue_dir(dev);
2242         kobject_del(&dev->kobj);
2243  Error:
2244         cleanup_glue_dir(dev, glue_dir);
2245 parent_error:
2246         put_device(parent);
2247 name_error:
2248         kfree(dev->p);
2249         dev->p = NULL;
2250         goto done;
2251 }
2252 EXPORT_SYMBOL_GPL(device_add);
2253 
2254 
2255 
2256 
2257 
2258 
2259 
2260 
2261 
2262 
2263 
2264 
2265 
2266 
2267 
2268 
2269 
2270 
2271 
2272 int device_register(struct device *dev)
2273 {
2274         device_initialize(dev);
2275         return device_add(dev);
2276 }
2277 EXPORT_SYMBOL_GPL(device_register);
2278 
2279 
2280 
2281 
2282 
2283 
2284 
2285 
2286 
2287 struct device *get_device(struct device *dev)
2288 {
2289         return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
2290 }
2291 EXPORT_SYMBOL_GPL(get_device);
2292 
2293 
2294 
2295 
2296 
2297 void put_device(struct device *dev)
2298 {
2299         
2300         if (dev)
2301                 kobject_put(&dev->kobj);
2302 }
2303 EXPORT_SYMBOL_GPL(put_device);
2304 
2305 bool kill_device(struct device *dev)
2306 {
2307         
2308 
2309 
2310 
2311 
2312 
2313 
2314         lockdep_assert_held(&dev->mutex);
2315 
2316         if (dev->p->dead)
2317                 return false;
2318         dev->p->dead = true;
2319         return true;
2320 }
2321 EXPORT_SYMBOL_GPL(kill_device);
2322 
2323 
2324 
2325 
2326 
2327 
2328 
2329 
2330 
2331 
2332 
2333 
2334 
2335 
2336 void device_del(struct device *dev)
2337 {
2338         struct device *parent = dev->parent;
2339         struct kobject *glue_dir = NULL;
2340         struct class_interface *class_intf;
2341 
2342         device_lock(dev);
2343         kill_device(dev);
2344         device_unlock(dev);
2345 
2346         
2347 
2348 
2349         if (dev->bus)
2350                 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2351                                              BUS_NOTIFY_DEL_DEVICE, dev);
2352 
2353         dpm_sysfs_remove(dev);
2354         if (parent)
2355                 klist_del(&dev->p->knode_parent);
2356         if (MAJOR(dev->devt)) {
2357                 devtmpfs_delete_node(dev);
2358                 device_remove_sys_dev_entry(dev);
2359                 device_remove_file(dev, &dev_attr_dev);
2360         }
2361         if (dev->class) {
2362                 device_remove_class_symlinks(dev);
2363 
2364                 mutex_lock(&dev->class->p->mutex);
2365                 
2366                 list_for_each_entry(class_intf,
2367                                     &dev->class->p->interfaces, node)
2368                         if (class_intf->remove_dev)
2369                                 class_intf->remove_dev(dev, class_intf);
2370                 
2371                 klist_del(&dev->p->knode_class);
2372                 mutex_unlock(&dev->class->p->mutex);
2373         }
2374         device_remove_file(dev, &dev_attr_uevent);
2375         device_remove_attrs(dev);
2376         bus_remove_device(dev);
2377         device_pm_remove(dev);
2378         driver_deferred_probe_del(dev);
2379         device_platform_notify(dev, KOBJ_REMOVE);
2380         device_remove_properties(dev);
2381         device_links_purge(dev);
2382 
2383         if (dev->bus)
2384                 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
2385                                              BUS_NOTIFY_REMOVED_DEVICE, dev);
2386         kobject_uevent(&dev->kobj, KOBJ_REMOVE);
2387         glue_dir = get_glue_dir(dev);
2388         kobject_del(&dev->kobj);
2389         cleanup_glue_dir(dev, glue_dir);
2390         put_device(parent);
2391 }
2392 EXPORT_SYMBOL_GPL(device_del);
2393 
2394 
2395 
2396 
2397 
2398 
2399 
2400 
2401 
2402 
2403 
2404 
2405 void device_unregister(struct device *dev)
2406 {
2407         pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2408         device_del(dev);
2409         put_device(dev);
2410 }
2411 EXPORT_SYMBOL_GPL(device_unregister);
2412 
2413 static struct device *prev_device(struct klist_iter *i)
2414 {
2415         struct klist_node *n = klist_prev(i);
2416         struct device *dev = NULL;
2417         struct device_private *p;
2418 
2419         if (n) {
2420                 p = to_device_private_parent(n);
2421                 dev = p->device;
2422         }
2423         return dev;
2424 }
2425 
2426 static struct device *next_device(struct klist_iter *i)
2427 {
2428         struct klist_node *n = klist_next(i);
2429         struct device *dev = NULL;
2430         struct device_private *p;
2431 
2432         if (n) {
2433                 p = to_device_private_parent(n);
2434                 dev = p->device;
2435         }
2436         return dev;
2437 }
2438 
2439 
2440 
2441 
2442 
2443 
2444 
2445 
2446 
2447 
2448 
2449 
2450 
2451 
2452 const char *device_get_devnode(struct device *dev,
2453                                umode_t *mode, kuid_t *uid, kgid_t *gid,
2454                                const char **tmp)
2455 {
2456         char *s;
2457 
2458         *tmp = NULL;
2459 
2460         
2461         if (dev->type && dev->type->devnode)
2462                 *tmp = dev->type->devnode(dev, mode, uid, gid);
2463         if (*tmp)
2464                 return *tmp;
2465 
2466         
2467         if (dev->class && dev->class->devnode)
2468                 *tmp = dev->class->devnode(dev, mode);
2469         if (*tmp)
2470                 return *tmp;
2471 
2472         
2473         if (strchr(dev_name(dev), '!') == NULL)
2474                 return dev_name(dev);
2475 
2476         
2477         s = kstrdup(dev_name(dev), GFP_KERNEL);
2478         if (!s)
2479                 return NULL;
2480         strreplace(s, '!', '/');
2481         return *tmp = s;
2482 }
2483 
2484 
2485 
2486 
2487 
2488 
2489 
2490 
2491 
2492 
2493 
2494 
2495 
2496 int device_for_each_child(struct device *parent, void *data,
2497                           int (*fn)(struct device *dev, void *data))
2498 {
2499         struct klist_iter i;
2500         struct device *child;
2501         int error = 0;
2502 
2503         if (!parent->p)
2504                 return 0;
2505 
2506         klist_iter_init(&parent->p->klist_children, &i);
2507         while (!error && (child = next_device(&i)))
2508                 error = fn(child, data);
2509         klist_iter_exit(&i);
2510         return error;
2511 }
2512 EXPORT_SYMBOL_GPL(device_for_each_child);
2513 
2514 
2515 
2516 
2517 
2518 
2519 
2520 
2521 
2522 
2523 
2524 
2525 
2526 int device_for_each_child_reverse(struct device *parent, void *data,
2527                                   int (*fn)(struct device *dev, void *data))
2528 {
2529         struct klist_iter i;
2530         struct device *child;
2531         int error = 0;
2532 
2533         if (!parent->p)
2534                 return 0;
2535 
2536         klist_iter_init(&parent->p->klist_children, &i);
2537         while ((child = prev_device(&i)) && !error)
2538                 error = fn(child, data);
2539         klist_iter_exit(&i);
2540         return error;
2541 }
2542 EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
2543 
2544 
2545 
2546 
2547 
2548 
2549 
2550 
2551 
2552 
2553 
2554 
2555 
2556 
2557 
2558 
2559 
2560 
2561 struct device *device_find_child(struct device *parent, void *data,
2562                                  int (*match)(struct device *dev, void *data))
2563 {
2564         struct klist_iter i;
2565         struct device *child;
2566 
2567         if (!parent)
2568                 return NULL;
2569 
2570         klist_iter_init(&parent->p->klist_children, &i);
2571         while ((child = next_device(&i)))
2572                 if (match(child, data) && get_device(child))
2573                         break;
2574         klist_iter_exit(&i);
2575         return child;
2576 }
2577 EXPORT_SYMBOL_GPL(device_find_child);
2578 
2579 
2580 
2581 
2582 
2583 
2584 
2585 
2586 
2587 
2588 
2589 struct device *device_find_child_by_name(struct device *parent,
2590                                          const char *name)
2591 {
2592         struct klist_iter i;
2593         struct device *child;
2594 
2595         if (!parent)
2596                 return NULL;
2597 
2598         klist_iter_init(&parent->p->klist_children, &i);
2599         while ((child = next_device(&i)))
2600                 if (!strcmp(dev_name(child), name) && get_device(child))
2601                         break;
2602         klist_iter_exit(&i);
2603         return child;
2604 }
2605 EXPORT_SYMBOL_GPL(device_find_child_by_name);
2606 
2607 int __init devices_init(void)
2608 {
2609         devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
2610         if (!devices_kset)
2611                 return -ENOMEM;
2612         dev_kobj = kobject_create_and_add("dev", NULL);
2613         if (!dev_kobj)
2614                 goto dev_kobj_err;
2615         sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
2616         if (!sysfs_dev_block_kobj)
2617                 goto block_kobj_err;
2618         sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
2619         if (!sysfs_dev_char_kobj)
2620                 goto char_kobj_err;
2621 
2622         return 0;
2623 
2624  char_kobj_err:
2625         kobject_put(sysfs_dev_block_kobj);
2626  block_kobj_err:
2627         kobject_put(dev_kobj);
2628  dev_kobj_err:
2629         kset_unregister(devices_kset);
2630         return -ENOMEM;
2631 }
2632 
2633 static int device_check_offline(struct device *dev, void *not_used)
2634 {
2635         int ret;
2636 
2637         ret = device_for_each_child(dev, NULL, device_check_offline);
2638         if (ret)
2639                 return ret;
2640 
2641         return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
2642 }
2643 
2644 
2645 
2646 
2647 
2648 
2649 
2650 
2651 
2652 
2653 
2654 
2655 int device_offline(struct device *dev)
2656 {
2657         int ret;
2658 
2659         if (dev->offline_disabled)
2660                 return -EPERM;
2661 
2662         ret = device_for_each_child(dev, NULL, device_check_offline);
2663         if (ret)
2664                 return ret;
2665 
2666         device_lock(dev);
2667         if (device_supports_offline(dev)) {
2668                 if (dev->offline) {
2669                         ret = 1;
2670                 } else {
2671                         ret = dev->bus->offline(dev);
2672                         if (!ret) {
2673                                 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2674                                 dev->offline = true;
2675                         }
2676                 }
2677         }
2678         device_unlock(dev);
2679 
2680         return ret;
2681 }
2682 
2683 
2684 
2685 
2686 
2687 
2688 
2689 
2690 
2691 
2692 
2693 int device_online(struct device *dev)
2694 {
2695         int ret = 0;
2696 
2697         device_lock(dev);
2698         if (device_supports_offline(dev)) {
2699                 if (dev->offline) {
2700                         ret = dev->bus->online(dev);
2701                         if (!ret) {
2702                                 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2703                                 dev->offline = false;
2704                         }
2705                 } else {
2706                         ret = 1;
2707                 }
2708         }
2709         device_unlock(dev);
2710 
2711         return ret;
2712 }
2713 
2714 struct root_device {
2715         struct device dev;
2716         struct module *owner;
2717 };
2718 
2719 static inline struct root_device *to_root_device(struct device *d)
2720 {
2721         return container_of(d, struct root_device, dev);
2722 }
2723 
2724 static void root_device_release(struct device *dev)
2725 {
2726         kfree(to_root_device(dev));
2727 }
2728 
2729 
2730 
2731 
2732 
2733 
2734 
2735 
2736 
2737 
2738 
2739 
2740 
2741 
2742 
2743 
2744 
2745 
2746 
2747 
2748 
2749 
2750 
2751 struct device *__root_device_register(const char *name, struct module *owner)
2752 {
2753         struct root_device *root;
2754         int err = -ENOMEM;
2755 
2756         root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
2757         if (!root)
2758                 return ERR_PTR(err);
2759 
2760         err = dev_set_name(&root->dev, "%s", name);
2761         if (err) {
2762                 kfree(root);
2763                 return ERR_PTR(err);
2764         }
2765 
2766         root->dev.release = root_device_release;
2767 
2768         err = device_register(&root->dev);
2769         if (err) {
2770                 put_device(&root->dev);
2771                 return ERR_PTR(err);
2772         }
2773 
2774 #ifdef CONFIG_MODULES   
2775         if (owner) {
2776                 struct module_kobject *mk = &owner->mkobj;
2777 
2778                 err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
2779                 if (err) {
2780                         device_unregister(&root->dev);
2781                         return ERR_PTR(err);
2782                 }
2783                 root->owner = owner;
2784         }
2785 #endif
2786 
2787         return &root->dev;
2788 }
2789 EXPORT_SYMBOL_GPL(__root_device_register);
2790 
2791 
2792 
2793 
2794 
2795 
2796 
2797 
2798 void root_device_unregister(struct device *dev)
2799 {
2800         struct root_device *root = to_root_device(dev);
2801 
2802         if (root->owner)
2803                 sysfs_remove_link(&root->dev.kobj, "module");
2804 
2805         device_unregister(dev);
2806 }
2807 EXPORT_SYMBOL_GPL(root_device_unregister);
2808 
2809 
2810 static void device_create_release(struct device *dev)
2811 {
2812         pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
2813         kfree(dev);
2814 }
2815 
2816 static __printf(6, 0) struct device *
2817 device_create_groups_vargs(struct class *class, struct device *parent,
2818                            dev_t devt, void *drvdata,
2819                            const struct attribute_group **groups,
2820                            const char *fmt, va_list args)
2821 {
2822         struct device *dev = NULL;
2823         int retval = -ENODEV;
2824 
2825         if (class == NULL || IS_ERR(class))
2826                 goto error;
2827 
2828         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2829         if (!dev) {
2830                 retval = -ENOMEM;
2831                 goto error;
2832         }
2833 
2834         device_initialize(dev);
2835         dev->devt = devt;
2836         dev->class = class;
2837         dev->parent = parent;
2838         dev->groups = groups;
2839         dev->release = device_create_release;
2840         dev_set_drvdata(dev, drvdata);
2841 
2842         retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
2843         if (retval)
2844                 goto error;
2845 
2846         retval = device_add(dev);
2847         if (retval)
2848                 goto error;
2849 
2850         return dev;
2851 
2852 error:
2853         put_device(dev);
2854         return ERR_PTR(retval);
2855 }
2856 
2857 
2858 
2859 
2860 
2861 
2862 
2863 
2864 
2865 
2866 
2867 
2868 
2869 
2870 
2871 
2872 
2873 
2874 
2875 
2876 
2877 
2878 
2879 
2880 
2881 
2882 struct device *device_create_vargs(struct class *class, struct device *parent,
2883                                    dev_t devt, void *drvdata, const char *fmt,
2884                                    va_list args)
2885 {
2886         return device_create_groups_vargs(class, parent, devt, drvdata, NULL,
2887                                           fmt, args);
2888 }
2889 EXPORT_SYMBOL_GPL(device_create_vargs);
2890 
2891 
2892 
2893 
2894 
2895 
2896 
2897 
2898 
2899 
2900 
2901 
2902 
2903 
2904 
2905 
2906 
2907 
2908 
2909 
2910 
2911 
2912 
2913 
2914 
2915 struct device *device_create(struct class *class, struct device *parent,
2916                              dev_t devt, void *drvdata, const char *fmt, ...)
2917 {
2918         va_list vargs;
2919         struct device *dev;
2920 
2921         va_start(vargs, fmt);
2922         dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
2923         va_end(vargs);
2924         return dev;
2925 }
2926 EXPORT_SYMBOL_GPL(device_create);
2927 
2928 
2929 
2930 
2931 
2932 
2933 
2934 
2935 
2936 
2937 
2938 
2939 
2940 
2941 
2942 
2943 
2944 
2945 
2946 
2947 
2948 
2949 
2950 
2951 
2952 
2953 
2954 
2955 struct device *device_create_with_groups(struct class *class,
2956                                          struct device *parent, dev_t devt,
2957                                          void *drvdata,
2958                                          const struct attribute_group **groups,
2959                                          const char *fmt, ...)
2960 {
2961         va_list vargs;
2962         struct device *dev;
2963 
2964         va_start(vargs, fmt);
2965         dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
2966                                          fmt, vargs);
2967         va_end(vargs);
2968         return dev;
2969 }
2970 EXPORT_SYMBOL_GPL(device_create_with_groups);
2971 
2972 
2973 
2974 
2975 
2976 
2977 
2978 
2979 
2980 void device_destroy(struct class *class, dev_t devt)
2981 {
2982         struct device *dev;
2983 
2984         dev = class_find_device_by_devt(class, devt);
2985         if (dev) {
2986                 put_device(dev);
2987                 device_unregister(dev);
2988         }
2989 }
2990 EXPORT_SYMBOL_GPL(device_destroy);
2991 
2992 
2993 
2994 
2995 
2996 
2997 
2998 
2999 
3000 
3001 
3002 
3003 
3004 
3005 
3006 
3007 
3008 
3009 
3010 
3011 
3012 
3013 
3014 
3015 
3016 
3017 
3018 
3019 
3020 
3021 
3022 
3023 
3024 
3025 
3026 
3027 
3028 
3029 
3030 
3031 int device_rename(struct device *dev, const char *new_name)
3032 {
3033         struct kobject *kobj = &dev->kobj;
3034         char *old_device_name = NULL;
3035         int error;
3036 
3037         dev = get_device(dev);
3038         if (!dev)
3039                 return -EINVAL;
3040 
3041         dev_dbg(dev, "renaming to %s\n", new_name);
3042 
3043         old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
3044         if (!old_device_name) {
3045                 error = -ENOMEM;
3046                 goto out;
3047         }
3048 
3049         if (dev->class) {
3050                 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
3051                                              kobj, old_device_name,
3052                                              new_name, kobject_namespace(kobj));
3053                 if (error)
3054                         goto out;
3055         }
3056 
3057         error = kobject_rename(kobj, new_name);
3058         if (error)
3059                 goto out;
3060 
3061 out:
3062         put_device(dev);
3063 
3064         kfree(old_device_name);
3065 
3066         return error;
3067 }
3068 EXPORT_SYMBOL_GPL(device_rename);
3069 
3070 static int device_move_class_links(struct device *dev,
3071                                    struct device *old_parent,
3072                                    struct device *new_parent)
3073 {
3074         int error = 0;
3075 
3076         if (old_parent)
3077                 sysfs_remove_link(&dev->kobj, "device");
3078         if (new_parent)
3079                 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
3080                                           "device");
3081         return error;
3082 }
3083 
3084 
3085 
3086 
3087 
3088 
3089 
3090 int device_move(struct device *dev, struct device *new_parent,
3091                 enum dpm_order dpm_order)
3092 {
3093         int error;
3094         struct device *old_parent;
3095         struct kobject *new_parent_kobj;
3096 
3097         dev = get_device(dev);
3098         if (!dev)
3099                 return -EINVAL;
3100 
3101         device_pm_lock();
3102         new_parent = get_device(new_parent);
3103         new_parent_kobj = get_device_parent(dev, new_parent);
3104         if (IS_ERR(new_parent_kobj)) {
3105                 error = PTR_ERR(new_parent_kobj);
3106                 put_device(new_parent);
3107                 goto out;
3108         }
3109 
3110         pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
3111                  __func__, new_parent ? dev_name(new_parent) : "<NULL>");
3112         error = kobject_move(&dev->kobj, new_parent_kobj);
3113         if (error) {
3114                 cleanup_glue_dir(dev, new_parent_kobj);
3115                 put_device(new_parent);
3116                 goto out;
3117         }
3118         old_parent = dev->parent;
3119         dev->parent = new_parent;
3120         if (old_parent)
3121                 klist_remove(&dev->p->knode_parent);
3122         if (new_parent) {
3123                 klist_add_tail(&dev->p->knode_parent,
3124                                &new_parent->p->klist_children);
3125                 set_dev_node(dev, dev_to_node(new_parent));
3126         }
3127 
3128         if (dev->class) {
3129                 error = device_move_class_links(dev, old_parent, new_parent);
3130                 if (error) {
3131                         
3132                         device_move_class_links(dev, new_parent, old_parent);
3133                         if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
3134                                 if (new_parent)
3135                                         klist_remove(&dev->p->knode_parent);
3136                                 dev->parent = old_parent;
3137                                 if (old_parent) {
3138                                         klist_add_tail(&dev->p->knode_parent,
3139                                                        &old_parent->p->klist_children);
3140                                         set_dev_node(dev, dev_to_node(old_parent));
3141                                 }
3142                         }
3143                         cleanup_glue_dir(dev, new_parent_kobj);
3144                         put_device(new_parent);
3145                         goto out;
3146                 }
3147         }
3148         switch (dpm_order) {
3149         case DPM_ORDER_NONE:
3150                 break;
3151         case DPM_ORDER_DEV_AFTER_PARENT:
3152                 device_pm_move_after(dev, new_parent);
3153                 devices_kset_move_after(dev, new_parent);
3154                 break;
3155         case DPM_ORDER_PARENT_BEFORE_DEV:
3156                 device_pm_move_before(new_parent, dev);
3157                 devices_kset_move_before(new_parent, dev);
3158                 break;
3159         case DPM_ORDER_DEV_LAST:
3160                 device_pm_move_last(dev);
3161                 devices_kset_move_last(dev);
3162                 break;
3163         }
3164 
3165         put_device(old_parent);
3166 out:
3167         device_pm_unlock();
3168         put_device(dev);
3169         return error;
3170 }
3171 EXPORT_SYMBOL_GPL(device_move);
3172 
3173 
3174 
3175 
3176 void device_shutdown(void)
3177 {
3178         struct device *dev, *parent;
3179 
3180         wait_for_device_probe();
3181         device_block_probing();
3182 
3183         cpufreq_suspend();
3184 
3185         spin_lock(&devices_kset->list_lock);
3186         
3187 
3188 
3189 
3190 
3191         while (!list_empty(&devices_kset->list)) {
3192                 dev = list_entry(devices_kset->list.prev, struct device,
3193                                 kobj.entry);
3194 
3195                 
3196 
3197 
3198 
3199 
3200                 parent = get_device(dev->parent);
3201                 get_device(dev);
3202                 
3203 
3204 
3205 
3206                 list_del_init(&dev->kobj.entry);
3207                 spin_unlock(&devices_kset->list_lock);
3208 
3209                 
3210                 if (parent)
3211                         device_lock(parent);
3212                 device_lock(dev);
3213 
3214                 
3215                 pm_runtime_get_noresume(dev);
3216                 pm_runtime_barrier(dev);
3217 
3218                 if (dev->class && dev->class->shutdown_pre) {
3219                         if (initcall_debug)
3220                                 dev_info(dev, "shutdown_pre\n");
3221                         dev->class->shutdown_pre(dev);
3222                 }
3223                 if (dev->bus && dev->bus->shutdown) {
3224                         if (initcall_debug)
3225                                 dev_info(dev, "shutdown\n");
3226                         dev->bus->shutdown(dev);
3227                 } else if (dev->driver && dev->driver->shutdown) {
3228                         if (initcall_debug)
3229                                 dev_info(dev, "shutdown\n");
3230                         dev->driver->shutdown(dev);
3231                 }
3232 
3233                 device_unlock(dev);
3234                 if (parent)
3235                         device_unlock(parent);
3236 
3237                 put_device(dev);
3238                 put_device(parent);
3239 
3240                 spin_lock(&devices_kset->list_lock);
3241         }
3242         spin_unlock(&devices_kset->list_lock);
3243 }
3244 
3245 
3246 
3247 
3248 
3249 #ifdef CONFIG_PRINTK
3250 static int
3251 create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
3252 {
3253         const char *subsys;
3254         size_t pos = 0;
3255 
3256         if (dev->class)
3257                 subsys = dev->class->name;
3258         else if (dev->bus)
3259                 subsys = dev->bus->name;
3260         else
3261                 return 0;
3262 
3263         pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
3264         if (pos >= hdrlen)
3265                 goto overflow;
3266 
3267         
3268 
3269 
3270 
3271 
3272 
3273 
3274         if (MAJOR(dev->devt)) {
3275                 char c;
3276 
3277                 if (strcmp(subsys, "block") == 0)
3278                         c = 'b';
3279                 else
3280                         c = 'c';
3281                 pos++;
3282                 pos += snprintf(hdr + pos, hdrlen - pos,
3283                                 "DEVICE=%c%u:%u",
3284                                 c, MAJOR(dev->devt), MINOR(dev->devt));
3285         } else if (strcmp(subsys, "net") == 0) {
3286                 struct net_device *net = to_net_dev(dev);
3287 
3288                 pos++;
3289                 pos += snprintf(hdr + pos, hdrlen - pos,
3290                                 "DEVICE=n%u", net->ifindex);
3291         } else {
3292                 pos++;
3293                 pos += snprintf(hdr + pos, hdrlen - pos,
3294                                 "DEVICE=+%s:%s", subsys, dev_name(dev));
3295         }
3296 
3297         if (pos >= hdrlen)
3298                 goto overflow;
3299 
3300         return pos;
3301 
3302 overflow:
3303         dev_WARN(dev, "device/subsystem name too long");
3304         return 0;
3305 }
3306 
3307 int dev_vprintk_emit(int level, const struct device *dev,
3308                      const char *fmt, va_list args)
3309 {
3310         char hdr[128];
3311         size_t hdrlen;
3312 
3313         hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
3314 
3315         return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
3316 }
3317 EXPORT_SYMBOL(dev_vprintk_emit);
3318 
3319 int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
3320 {
3321         va_list args;
3322         int r;
3323 
3324         va_start(args, fmt);
3325 
3326         r = dev_vprintk_emit(level, dev, fmt, args);
3327 
3328         va_end(args);
3329 
3330         return r;
3331 }
3332 EXPORT_SYMBOL(dev_printk_emit);
3333 
3334 static void __dev_printk(const char *level, const struct device *dev,
3335                         struct va_format *vaf)
3336 {
3337         if (dev)
3338                 dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
3339                                 dev_driver_string(dev), dev_name(dev), vaf);
3340         else
3341                 printk("%s(NULL device *): %pV", level, vaf);
3342 }
3343 
3344 void dev_printk(const char *level, const struct device *dev,
3345                 const char *fmt, ...)
3346 {
3347         struct va_format vaf;
3348         va_list args;
3349 
3350         va_start(args, fmt);
3351 
3352         vaf.fmt = fmt;
3353         vaf.va = &args;
3354 
3355         __dev_printk(level, dev, &vaf);
3356 
3357         va_end(args);
3358 }
3359 EXPORT_SYMBOL(dev_printk);
3360 
3361 #define define_dev_printk_level(func, kern_level)               \
3362 void func(const struct device *dev, const char *fmt, ...)       \
3363 {                                                               \
3364         struct va_format vaf;                                   \
3365         va_list args;                                           \
3366                                                                 \
3367         va_start(args, fmt);                                    \
3368                                                                 \
3369         vaf.fmt = fmt;                                          \
3370         vaf.va = &args;                                         \
3371                                                                 \
3372         __dev_printk(kern_level, dev, &vaf);                    \
3373                                                                 \
3374         va_end(args);                                           \
3375 }                                                               \
3376 EXPORT_SYMBOL(func);
3377 
3378 define_dev_printk_level(_dev_emerg, KERN_EMERG);
3379 define_dev_printk_level(_dev_alert, KERN_ALERT);
3380 define_dev_printk_level(_dev_crit, KERN_CRIT);
3381 define_dev_printk_level(_dev_err, KERN_ERR);
3382 define_dev_printk_level(_dev_warn, KERN_WARNING);
3383 define_dev_printk_level(_dev_notice, KERN_NOTICE);
3384 define_dev_printk_level(_dev_info, KERN_INFO);
3385 
3386 #endif
3387 
3388 static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
3389 {
3390         return fwnode && !IS_ERR(fwnode->secondary);
3391 }
3392 
3393 
3394 
3395 
3396 
3397 
3398 
3399 
3400 
3401 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
3402 {
3403         if (fwnode) {
3404                 struct fwnode_handle *fn = dev->fwnode;
3405 
3406                 if (fwnode_is_primary(fn))
3407                         fn = fn->secondary;
3408 
3409                 if (fn) {
3410                         WARN_ON(fwnode->secondary);
3411                         fwnode->secondary = fn;
3412                 }
3413                 dev->fwnode = fwnode;
3414         } else {
3415                 dev->fwnode = fwnode_is_primary(dev->fwnode) ?
3416                         dev->fwnode->secondary : NULL;
3417         }
3418 }
3419 EXPORT_SYMBOL_GPL(set_primary_fwnode);
3420 
3421 
3422 
3423 
3424 
3425 
3426 
3427 
3428 
3429 
3430 void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
3431 {
3432         if (fwnode)
3433                 fwnode->secondary = ERR_PTR(-ENODEV);
3434 
3435         if (fwnode_is_primary(dev->fwnode))
3436                 dev->fwnode->secondary = fwnode;
3437         else
3438                 dev->fwnode = fwnode;
3439 }
3440 
3441 
3442 
3443 
3444 
3445 
3446 
3447 
3448 
3449 void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
3450 {
3451         of_node_put(dev->of_node);
3452         dev->of_node = of_node_get(dev2->of_node);
3453         dev->of_node_reused = true;
3454 }
3455 EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
3456 
3457 int device_match_name(struct device *dev, const void *name)
3458 {
3459         return sysfs_streq(dev_name(dev), name);
3460 }
3461 EXPORT_SYMBOL_GPL(device_match_name);
3462 
3463 int device_match_of_node(struct device *dev, const void *np)
3464 {
3465         return dev->of_node == np;
3466 }
3467 EXPORT_SYMBOL_GPL(device_match_of_node);
3468 
3469 int device_match_fwnode(struct device *dev, const void *fwnode)
3470 {
3471         return dev_fwnode(dev) == fwnode;
3472 }
3473 EXPORT_SYMBOL_GPL(device_match_fwnode);
3474 
3475 int device_match_devt(struct device *dev, const void *pdevt)
3476 {
3477         return dev->devt == *(dev_t *)pdevt;
3478 }
3479 EXPORT_SYMBOL_GPL(device_match_devt);
3480 
3481 int device_match_acpi_dev(struct device *dev, const void *adev)
3482 {
3483         return ACPI_COMPANION(dev) == adev;
3484 }
3485 EXPORT_SYMBOL(device_match_acpi_dev);
3486 
3487 int device_match_any(struct device *dev, const void *unused)
3488 {
3489         return 1;
3490 }
3491 EXPORT_SYMBOL_GPL(device_match_any);