This source file includes following definitions.
- major_to_index
- chrdev_show
- find_dynamic_major
- __register_chrdev_region
- __unregister_chrdev_region
- register_chrdev_region
- alloc_chrdev_region
- __register_chrdev
- unregister_chrdev_region
- __unregister_chrdev
- cdev_get
- cdev_put
- chrdev_open
- cd_forget
- cdev_purge
- exact_match
- exact_lock
- cdev_add
- cdev_set_parent
- cdev_device_add
- cdev_device_del
- cdev_unmap
- cdev_del
- cdev_default_release
- cdev_dynamic_release
- cdev_alloc
- cdev_init
- base_probe
- chrdev_init
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 #include <linux/init.h>
   9 #include <linux/fs.h>
  10 #include <linux/kdev_t.h>
  11 #include <linux/slab.h>
  12 #include <linux/string.h>
  13 
  14 #include <linux/major.h>
  15 #include <linux/errno.h>
  16 #include <linux/module.h>
  17 #include <linux/seq_file.h>
  18 
  19 #include <linux/kobject.h>
  20 #include <linux/kobj_map.h>
  21 #include <linux/cdev.h>
  22 #include <linux/mutex.h>
  23 #include <linux/backing-dev.h>
  24 #include <linux/tty.h>
  25 
  26 #include "internal.h"
  27 
  28 static struct kobj_map *cdev_map;
  29 
  30 static DEFINE_MUTEX(chrdevs_lock);
  31 
  32 #define CHRDEV_MAJOR_HASH_SIZE 255
  33 
  34 static struct char_device_struct {
  35         struct char_device_struct *next;
  36         unsigned int major;
  37         unsigned int baseminor;
  38         int minorct;
  39         char name[64];
  40         struct cdev *cdev;              
  41 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
  42 
  43 
  44 static inline int major_to_index(unsigned major)
  45 {
  46         return major % CHRDEV_MAJOR_HASH_SIZE;
  47 }
  48 
  49 #ifdef CONFIG_PROC_FS
  50 
  51 void chrdev_show(struct seq_file *f, off_t offset)
  52 {
  53         struct char_device_struct *cd;
  54 
  55         mutex_lock(&chrdevs_lock);
  56         for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) {
  57                 if (cd->major == offset)
  58                         seq_printf(f, "%3d %s\n", cd->major, cd->name);
  59         }
  60         mutex_unlock(&chrdevs_lock);
  61 }
  62 
  63 #endif 
  64 
  65 static int find_dynamic_major(void)
  66 {
  67         int i;
  68         struct char_device_struct *cd;
  69 
  70         for (i = ARRAY_SIZE(chrdevs)-1; i >= CHRDEV_MAJOR_DYN_END; i--) {
  71                 if (chrdevs[i] == NULL)
  72                         return i;
  73         }
  74 
  75         for (i = CHRDEV_MAJOR_DYN_EXT_START;
  76              i >= CHRDEV_MAJOR_DYN_EXT_END; i--) {
  77                 for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next)
  78                         if (cd->major == i)
  79                                 break;
  80 
  81                 if (cd == NULL)
  82                         return i;
  83         }
  84 
  85         return -EBUSY;
  86 }
  87 
  88 
  89 
  90 
  91 
  92 
  93 
  94 
  95 
  96 static struct char_device_struct *
  97 __register_chrdev_region(unsigned int major, unsigned int baseminor,
  98                            int minorct, const char *name)
  99 {
 100         struct char_device_struct *cd, *curr, *prev = NULL;
 101         int ret;
 102         int i;
 103 
 104         if (major >= CHRDEV_MAJOR_MAX) {
 105                 pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
 106                        name, major, CHRDEV_MAJOR_MAX-1);
 107                 return ERR_PTR(-EINVAL);
 108         }
 109 
 110         if (minorct > MINORMASK + 1 - baseminor) {
 111                 pr_err("CHRDEV \"%s\" minor range requested (%u-%u) is out of range of maximum range (%u-%u) for a single major\n",
 112                         name, baseminor, baseminor + minorct - 1, 0, MINORMASK);
 113                 return ERR_PTR(-EINVAL);
 114         }
 115 
 116         cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
 117         if (cd == NULL)
 118                 return ERR_PTR(-ENOMEM);
 119 
 120         mutex_lock(&chrdevs_lock);
 121 
 122         if (major == 0) {
 123                 ret = find_dynamic_major();
 124                 if (ret < 0) {
 125                         pr_err("CHRDEV \"%s\" dynamic allocation region is full\n",
 126                                name);
 127                         goto out;
 128                 }
 129                 major = ret;
 130         }
 131 
 132         ret = -EBUSY;
 133         i = major_to_index(major);
 134         for (curr = chrdevs[i]; curr; prev = curr, curr = curr->next) {
 135                 if (curr->major < major)
 136                         continue;
 137 
 138                 if (curr->major > major)
 139                         break;
 140 
 141                 if (curr->baseminor + curr->minorct <= baseminor)
 142                         continue;
 143 
 144                 if (curr->baseminor >= baseminor + minorct)
 145                         break;
 146 
 147                 goto out;
 148         }
 149 
 150         cd->major = major;
 151         cd->baseminor = baseminor;
 152         cd->minorct = minorct;
 153         strlcpy(cd->name, name, sizeof(cd->name));
 154 
 155         if (!prev) {
 156                 cd->next = curr;
 157                 chrdevs[i] = cd;
 158         } else {
 159                 cd->next = prev->next;
 160                 prev->next = cd;
 161         }
 162 
 163         mutex_unlock(&chrdevs_lock);
 164         return cd;
 165 out:
 166         mutex_unlock(&chrdevs_lock);
 167         kfree(cd);
 168         return ERR_PTR(ret);
 169 }
 170 
 171 static struct char_device_struct *
 172 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
 173 {
 174         struct char_device_struct *cd = NULL, **cp;
 175         int i = major_to_index(major);
 176 
 177         mutex_lock(&chrdevs_lock);
 178         for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
 179                 if ((*cp)->major == major &&
 180                     (*cp)->baseminor == baseminor &&
 181                     (*cp)->minorct == minorct)
 182                         break;
 183         if (*cp) {
 184                 cd = *cp;
 185                 *cp = cd->next;
 186         }
 187         mutex_unlock(&chrdevs_lock);
 188         return cd;
 189 }
 190 
 191 
 192 
 193 
 194 
 195 
 196 
 197 
 198 
 199 
 200 int register_chrdev_region(dev_t from, unsigned count, const char *name)
 201 {
 202         struct char_device_struct *cd;
 203         dev_t to = from + count;
 204         dev_t n, next;
 205 
 206         for (n = from; n < to; n = next) {
 207                 next = MKDEV(MAJOR(n)+1, 0);
 208                 if (next > to)
 209                         next = to;
 210                 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
 211                                next - n, name);
 212                 if (IS_ERR(cd))
 213                         goto fail;
 214         }
 215         return 0;
 216 fail:
 217         to = n;
 218         for (n = from; n < to; n = next) {
 219                 next = MKDEV(MAJOR(n)+1, 0);
 220                 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
 221         }
 222         return PTR_ERR(cd);
 223 }
 224 
 225 
 226 
 227 
 228 
 229 
 230 
 231 
 232 
 233 
 234 
 235 
 236 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
 237                         const char *name)
 238 {
 239         struct char_device_struct *cd;
 240         cd = __register_chrdev_region(0, baseminor, count, name);
 241         if (IS_ERR(cd))
 242                 return PTR_ERR(cd);
 243         *dev = MKDEV(cd->major, cd->baseminor);
 244         return 0;
 245 }
 246 
 247 
 248 
 249 
 250 
 251 
 252 
 253 
 254 
 255 
 256 
 257 
 258 
 259 
 260 
 261 
 262 
 263 
 264 
 265 
 266 
 267 
 268 int __register_chrdev(unsigned int major, unsigned int baseminor,
 269                       unsigned int count, const char *name,
 270                       const struct file_operations *fops)
 271 {
 272         struct char_device_struct *cd;
 273         struct cdev *cdev;
 274         int err = -ENOMEM;
 275 
 276         cd = __register_chrdev_region(major, baseminor, count, name);
 277         if (IS_ERR(cd))
 278                 return PTR_ERR(cd);
 279 
 280         cdev = cdev_alloc();
 281         if (!cdev)
 282                 goto out2;
 283 
 284         cdev->owner = fops->owner;
 285         cdev->ops = fops;
 286         kobject_set_name(&cdev->kobj, "%s", name);
 287 
 288         err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
 289         if (err)
 290                 goto out;
 291 
 292         cd->cdev = cdev;
 293 
 294         return major ? 0 : cd->major;
 295 out:
 296         kobject_put(&cdev->kobj);
 297 out2:
 298         kfree(__unregister_chrdev_region(cd->major, baseminor, count));
 299         return err;
 300 }
 301 
 302 
 303 
 304 
 305 
 306 
 307 
 308 
 309 
 310 
 311 void unregister_chrdev_region(dev_t from, unsigned count)
 312 {
 313         dev_t to = from + count;
 314         dev_t n, next;
 315 
 316         for (n = from; n < to; n = next) {
 317                 next = MKDEV(MAJOR(n)+1, 0);
 318                 if (next > to)
 319                         next = to;
 320                 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
 321         }
 322 }
 323 
 324 
 325 
 326 
 327 
 328 
 329 
 330 
 331 
 332 
 333 
 334 
 335 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
 336                          unsigned int count, const char *name)
 337 {
 338         struct char_device_struct *cd;
 339 
 340         cd = __unregister_chrdev_region(major, baseminor, count);
 341         if (cd && cd->cdev)
 342                 cdev_del(cd->cdev);
 343         kfree(cd);
 344 }
 345 
 346 static DEFINE_SPINLOCK(cdev_lock);
 347 
 348 static struct kobject *cdev_get(struct cdev *p)
 349 {
 350         struct module *owner = p->owner;
 351         struct kobject *kobj;
 352 
 353         if (owner && !try_module_get(owner))
 354                 return NULL;
 355         kobj = kobject_get_unless_zero(&p->kobj);
 356         if (!kobj)
 357                 module_put(owner);
 358         return kobj;
 359 }
 360 
 361 void cdev_put(struct cdev *p)
 362 {
 363         if (p) {
 364                 struct module *owner = p->owner;
 365                 kobject_put(&p->kobj);
 366                 module_put(owner);
 367         }
 368 }
 369 
 370 
 371 
 372 
 373 static int chrdev_open(struct inode *inode, struct file *filp)
 374 {
 375         const struct file_operations *fops;
 376         struct cdev *p;
 377         struct cdev *new = NULL;
 378         int ret = 0;
 379 
 380         spin_lock(&cdev_lock);
 381         p = inode->i_cdev;
 382         if (!p) {
 383                 struct kobject *kobj;
 384                 int idx;
 385                 spin_unlock(&cdev_lock);
 386                 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
 387                 if (!kobj)
 388                         return -ENXIO;
 389                 new = container_of(kobj, struct cdev, kobj);
 390                 spin_lock(&cdev_lock);
 391                 
 392 
 393                 p = inode->i_cdev;
 394                 if (!p) {
 395                         inode->i_cdev = p = new;
 396                         list_add(&inode->i_devices, &p->list);
 397                         new = NULL;
 398                 } else if (!cdev_get(p))
 399                         ret = -ENXIO;
 400         } else if (!cdev_get(p))
 401                 ret = -ENXIO;
 402         spin_unlock(&cdev_lock);
 403         cdev_put(new);
 404         if (ret)
 405                 return ret;
 406 
 407         ret = -ENXIO;
 408         fops = fops_get(p->ops);
 409         if (!fops)
 410                 goto out_cdev_put;
 411 
 412         replace_fops(filp, fops);
 413         if (filp->f_op->open) {
 414                 ret = filp->f_op->open(inode, filp);
 415                 if (ret)
 416                         goto out_cdev_put;
 417         }
 418 
 419         return 0;
 420 
 421  out_cdev_put:
 422         cdev_put(p);
 423         return ret;
 424 }
 425 
 426 void cd_forget(struct inode *inode)
 427 {
 428         spin_lock(&cdev_lock);
 429         list_del_init(&inode->i_devices);
 430         inode->i_cdev = NULL;
 431         inode->i_mapping = &inode->i_data;
 432         spin_unlock(&cdev_lock);
 433 }
 434 
 435 static void cdev_purge(struct cdev *cdev)
 436 {
 437         spin_lock(&cdev_lock);
 438         while (!list_empty(&cdev->list)) {
 439                 struct inode *inode;
 440                 inode = container_of(cdev->list.next, struct inode, i_devices);
 441                 list_del_init(&inode->i_devices);
 442                 inode->i_cdev = NULL;
 443         }
 444         spin_unlock(&cdev_lock);
 445 }
 446 
 447 
 448 
 449 
 450 
 451 
 452 const struct file_operations def_chr_fops = {
 453         .open = chrdev_open,
 454         .llseek = noop_llseek,
 455 };
 456 
 457 static struct kobject *exact_match(dev_t dev, int *part, void *data)
 458 {
 459         struct cdev *p = data;
 460         return &p->kobj;
 461 }
 462 
 463 static int exact_lock(dev_t dev, void *data)
 464 {
 465         struct cdev *p = data;
 466         return cdev_get(p) ? 0 : -1;
 467 }
 468 
 469 
 470 
 471 
 472 
 473 
 474 
 475 
 476 
 477 
 478 
 479 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
 480 {
 481         int error;
 482 
 483         p->dev = dev;
 484         p->count = count;
 485 
 486         error = kobj_map(cdev_map, dev, count, NULL,
 487                          exact_match, exact_lock, p);
 488         if (error)
 489                 return error;
 490 
 491         kobject_get(p->kobj.parent);
 492 
 493         return 0;
 494 }
 495 
 496 
 497 
 498 
 499 
 500 
 501 
 502 
 503 
 504 
 505 void cdev_set_parent(struct cdev *p, struct kobject *kobj)
 506 {
 507         WARN_ON(!kobj->state_initialized);
 508         p->kobj.parent = kobj;
 509 }
 510 
 511 
 512 
 513 
 514 
 515 
 516 
 517 
 518 
 519 
 520 
 521 
 522 
 523 
 524 
 525 
 526 
 527 
 528 
 529 
 530 
 531 
 532 
 533 
 534 int cdev_device_add(struct cdev *cdev, struct device *dev)
 535 {
 536         int rc = 0;
 537 
 538         if (dev->devt) {
 539                 cdev_set_parent(cdev, &dev->kobj);
 540 
 541                 rc = cdev_add(cdev, dev->devt, 1);
 542                 if (rc)
 543                         return rc;
 544         }
 545 
 546         rc = device_add(dev);
 547         if (rc)
 548                 cdev_del(cdev);
 549 
 550         return rc;
 551 }
 552 
 553 
 554 
 555 
 556 
 557 
 558 
 559 
 560 
 561 
 562 
 563 
 564 
 565 
 566 
 567 
 568 void cdev_device_del(struct cdev *cdev, struct device *dev)
 569 {
 570         device_del(dev);
 571         if (dev->devt)
 572                 cdev_del(cdev);
 573 }
 574 
 575 static void cdev_unmap(dev_t dev, unsigned count)
 576 {
 577         kobj_unmap(cdev_map, dev, count);
 578 }
 579 
 580 
 581 
 582 
 583 
 584 
 585 
 586 
 587 
 588 
 589 
 590 
 591 void cdev_del(struct cdev *p)
 592 {
 593         cdev_unmap(p->dev, p->count);
 594         kobject_put(&p->kobj);
 595 }
 596 
 597 
 598 static void cdev_default_release(struct kobject *kobj)
 599 {
 600         struct cdev *p = container_of(kobj, struct cdev, kobj);
 601         struct kobject *parent = kobj->parent;
 602 
 603         cdev_purge(p);
 604         kobject_put(parent);
 605 }
 606 
 607 static void cdev_dynamic_release(struct kobject *kobj)
 608 {
 609         struct cdev *p = container_of(kobj, struct cdev, kobj);
 610         struct kobject *parent = kobj->parent;
 611 
 612         cdev_purge(p);
 613         kfree(p);
 614         kobject_put(parent);
 615 }
 616 
 617 static struct kobj_type ktype_cdev_default = {
 618         .release        = cdev_default_release,
 619 };
 620 
 621 static struct kobj_type ktype_cdev_dynamic = {
 622         .release        = cdev_dynamic_release,
 623 };
 624 
 625 
 626 
 627 
 628 
 629 
 630 struct cdev *cdev_alloc(void)
 631 {
 632         struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
 633         if (p) {
 634                 INIT_LIST_HEAD(&p->list);
 635                 kobject_init(&p->kobj, &ktype_cdev_dynamic);
 636         }
 637         return p;
 638 }
 639 
 640 
 641 
 642 
 643 
 644 
 645 
 646 
 647 
 648 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
 649 {
 650         memset(cdev, 0, sizeof *cdev);
 651         INIT_LIST_HEAD(&cdev->list);
 652         kobject_init(&cdev->kobj, &ktype_cdev_default);
 653         cdev->ops = fops;
 654 }
 655 
 656 static struct kobject *base_probe(dev_t dev, int *part, void *data)
 657 {
 658         if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
 659                 
 660                 request_module("char-major-%d", MAJOR(dev));
 661         return NULL;
 662 }
 663 
 664 void __init chrdev_init(void)
 665 {
 666         cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
 667 }
 668 
 669 
 670 
 671 EXPORT_SYMBOL(register_chrdev_region);
 672 EXPORT_SYMBOL(unregister_chrdev_region);
 673 EXPORT_SYMBOL(alloc_chrdev_region);
 674 EXPORT_SYMBOL(cdev_init);
 675 EXPORT_SYMBOL(cdev_alloc);
 676 EXPORT_SYMBOL(cdev_del);
 677 EXPORT_SYMBOL(cdev_add);
 678 EXPORT_SYMBOL(cdev_set_parent);
 679 EXPORT_SYMBOL(cdev_device_add);
 680 EXPORT_SYMBOL(cdev_device_del);
 681 EXPORT_SYMBOL(__register_chrdev);
 682 EXPORT_SYMBOL(__unregister_chrdev);