Lines Matching refs:ubi
156 int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) in ubi_volume_notify() argument
161 ubi_do_get_device_info(ubi, &nt.di); in ubi_volume_notify()
162 ubi_do_get_volume_info(ubi, vol, &nt.vi); in ubi_volume_notify()
169 ret = ubi_update_fastmap(ubi); in ubi_volume_notify()
171 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret); in ubi_volume_notify()
188 int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) in ubi_notify_all() argument
193 ubi_do_get_device_info(ubi, &nt.di); in ubi_notify_all()
195 mutex_lock(&ubi->device_mutex); in ubi_notify_all()
196 for (i = 0; i < ubi->vtbl_slots; i++) { in ubi_notify_all()
202 if (!ubi->volumes[i]) in ubi_notify_all()
205 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); in ubi_notify_all()
213 mutex_unlock(&ubi->device_mutex); in ubi_notify_all()
236 struct ubi_device *ubi = ubi_devices[i]; in ubi_enumerate_volumes() local
238 if (!ubi) in ubi_enumerate_volumes()
240 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); in ubi_enumerate_volumes()
257 struct ubi_device *ubi; in ubi_get_device() local
260 ubi = ubi_devices[ubi_num]; in ubi_get_device()
261 if (ubi) { in ubi_get_device()
262 ubi_assert(ubi->ref_count >= 0); in ubi_get_device()
263 ubi->ref_count += 1; in ubi_get_device()
264 get_device(&ubi->dev); in ubi_get_device()
268 return ubi; in ubi_get_device()
275 void ubi_put_device(struct ubi_device *ubi) in ubi_put_device() argument
278 ubi->ref_count -= 1; in ubi_put_device()
279 put_device(&ubi->dev); in ubi_put_device()
293 struct ubi_device *ubi; in ubi_get_by_major() local
297 ubi = ubi_devices[i]; in ubi_get_by_major()
298 if (ubi && MAJOR(ubi->cdev.dev) == major) { in ubi_get_by_major()
299 ubi_assert(ubi->ref_count >= 0); in ubi_get_by_major()
300 ubi->ref_count += 1; in ubi_get_by_major()
301 get_device(&ubi->dev); in ubi_get_by_major()
303 return ubi; in ubi_get_by_major()
325 struct ubi_device *ubi = ubi_devices[i]; in ubi_major2num() local
327 if (ubi && MAJOR(ubi->cdev.dev) == major) { in ubi_major2num()
328 ubi_num = ubi->ubi_num; in ubi_major2num()
342 struct ubi_device *ubi; in dev_attribute_show() local
354 ubi = container_of(dev, struct ubi_device, dev); in dev_attribute_show()
355 ubi = ubi_get_device(ubi->ubi_num); in dev_attribute_show()
356 if (!ubi) in dev_attribute_show()
360 ret = sprintf(buf, "%d\n", ubi->leb_size); in dev_attribute_show()
362 ret = sprintf(buf, "%d\n", ubi->avail_pebs); in dev_attribute_show()
364 ret = sprintf(buf, "%d\n", ubi->good_peb_count); in dev_attribute_show()
366 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); in dev_attribute_show()
368 ret = sprintf(buf, "%d\n", ubi->max_ec); in dev_attribute_show()
370 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); in dev_attribute_show()
372 ret = sprintf(buf, "%d\n", ubi->bad_peb_count); in dev_attribute_show()
374 ret = sprintf(buf, "%d\n", ubi->vtbl_slots); in dev_attribute_show()
376 ret = sprintf(buf, "%d\n", ubi->min_io_size); in dev_attribute_show()
378 ret = sprintf(buf, "%d\n", ubi->thread_enabled); in dev_attribute_show()
380 ret = sprintf(buf, "%d\n", ubi->mtd->index); in dev_attribute_show()
384 ubi_put_device(ubi); in dev_attribute_show()
390 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); in dev_release() local
392 kfree(ubi); in dev_release()
404 static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) in ubi_sysfs_init() argument
408 ubi->dev.release = dev_release; in ubi_sysfs_init()
409 ubi->dev.devt = ubi->cdev.dev; in ubi_sysfs_init()
410 ubi->dev.class = ubi_class; in ubi_sysfs_init()
411 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); in ubi_sysfs_init()
412 err = device_register(&ubi->dev); in ubi_sysfs_init()
417 err = device_create_file(&ubi->dev, &dev_eraseblock_size); in ubi_sysfs_init()
420 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); in ubi_sysfs_init()
423 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); in ubi_sysfs_init()
426 err = device_create_file(&ubi->dev, &dev_volumes_count); in ubi_sysfs_init()
429 err = device_create_file(&ubi->dev, &dev_max_ec); in ubi_sysfs_init()
432 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); in ubi_sysfs_init()
435 err = device_create_file(&ubi->dev, &dev_bad_peb_count); in ubi_sysfs_init()
438 err = device_create_file(&ubi->dev, &dev_max_vol_count); in ubi_sysfs_init()
441 err = device_create_file(&ubi->dev, &dev_min_io_size); in ubi_sysfs_init()
444 err = device_create_file(&ubi->dev, &dev_bgt_enabled); in ubi_sysfs_init()
447 err = device_create_file(&ubi->dev, &dev_mtd_num); in ubi_sysfs_init()
455 static void ubi_sysfs_close(struct ubi_device *ubi) in ubi_sysfs_close() argument
457 device_remove_file(&ubi->dev, &dev_mtd_num); in ubi_sysfs_close()
458 device_remove_file(&ubi->dev, &dev_bgt_enabled); in ubi_sysfs_close()
459 device_remove_file(&ubi->dev, &dev_min_io_size); in ubi_sysfs_close()
460 device_remove_file(&ubi->dev, &dev_max_vol_count); in ubi_sysfs_close()
461 device_remove_file(&ubi->dev, &dev_bad_peb_count); in ubi_sysfs_close()
462 device_remove_file(&ubi->dev, &dev_reserved_for_bad); in ubi_sysfs_close()
463 device_remove_file(&ubi->dev, &dev_max_ec); in ubi_sysfs_close()
464 device_remove_file(&ubi->dev, &dev_volumes_count); in ubi_sysfs_close()
465 device_remove_file(&ubi->dev, &dev_total_eraseblocks); in ubi_sysfs_close()
466 device_remove_file(&ubi->dev, &dev_avail_eraseblocks); in ubi_sysfs_close()
467 device_remove_file(&ubi->dev, &dev_eraseblock_size); in ubi_sysfs_close()
468 device_unregister(&ubi->dev); in ubi_sysfs_close()
475 static void kill_volumes(struct ubi_device *ubi) in kill_volumes() argument
479 for (i = 0; i < ubi->vtbl_slots; i++) in kill_volumes()
480 if (ubi->volumes[i]) in kill_volumes()
481 ubi_free_volume(ubi, ubi->volumes[i]); in kill_volumes()
502 static int uif_init(struct ubi_device *ubi, int *ref) in uif_init() argument
508 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); in uif_init()
518 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); in uif_init()
520 ubi_err(ubi, "cannot register UBI character devices"); in uif_init()
525 cdev_init(&ubi->cdev, &ubi_cdev_operations); in uif_init()
526 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); in uif_init()
527 ubi->cdev.owner = THIS_MODULE; in uif_init()
529 err = cdev_add(&ubi->cdev, dev, 1); in uif_init()
531 ubi_err(ubi, "cannot add character device"); in uif_init()
535 err = ubi_sysfs_init(ubi, ref); in uif_init()
539 for (i = 0; i < ubi->vtbl_slots; i++) in uif_init()
540 if (ubi->volumes[i]) { in uif_init()
541 err = ubi_add_volume(ubi, ubi->volumes[i]); in uif_init()
543 ubi_err(ubi, "cannot add volume %d", i); in uif_init()
551 kill_volumes(ubi); in uif_init()
554 get_device(&ubi->dev); in uif_init()
555 ubi_sysfs_close(ubi); in uif_init()
556 cdev_del(&ubi->cdev); in uif_init()
558 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); in uif_init()
559 ubi_err(ubi, "cannot initialize UBI %s, error %d", in uif_init()
560 ubi->ubi_name, err); in uif_init()
572 static void uif_close(struct ubi_device *ubi) in uif_close() argument
574 kill_volumes(ubi); in uif_close()
575 ubi_sysfs_close(ubi); in uif_close()
576 cdev_del(&ubi->cdev); in uif_close()
577 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); in uif_close()
584 void ubi_free_internal_volumes(struct ubi_device *ubi) in ubi_free_internal_volumes() argument
588 for (i = ubi->vtbl_slots; in ubi_free_internal_volumes()
589 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { in ubi_free_internal_volumes()
590 kfree(ubi->volumes[i]->eba_tbl); in ubi_free_internal_volumes()
591 kfree(ubi->volumes[i]); in ubi_free_internal_volumes()
595 static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024) in get_bad_peb_limit() argument
612 device_size = mtd_get_device_size(ubi->mtd); in get_bad_peb_limit()
613 device_pebs = mtd_div_by_eb(device_size, ubi->mtd); in get_bad_peb_limit()
639 static int io_init(struct ubi_device *ubi, int max_beb_per1024) in io_init() argument
644 if (ubi->mtd->numeraseregions != 0) { in io_init()
654 ubi_err(ubi, "multiple regions, not implemented"); in io_init()
658 if (ubi->vid_hdr_offset < 0) in io_init()
666 ubi->peb_size = ubi->mtd->erasesize; in io_init()
667 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); in io_init()
668 ubi->flash_size = ubi->mtd->size; in io_init()
670 if (mtd_can_have_bb(ubi->mtd)) { in io_init()
671 ubi->bad_allowed = 1; in io_init()
672 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024); in io_init()
675 if (ubi->mtd->type == MTD_NORFLASH) { in io_init()
676 ubi_assert(ubi->mtd->writesize == 1); in io_init()
677 ubi->nor_flash = 1; in io_init()
680 ubi->min_io_size = ubi->mtd->writesize; in io_init()
681 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; in io_init()
688 if (!is_power_of_2(ubi->min_io_size)) { in io_init()
689 ubi_err(ubi, "min. I/O unit (%d) is not power of 2", in io_init()
690 ubi->min_io_size); in io_init()
694 ubi_assert(ubi->hdrs_min_io_size > 0); in io_init()
695 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); in io_init()
696 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); in io_init()
698 ubi->max_write_size = ubi->mtd->writebufsize; in io_init()
703 if (ubi->max_write_size < ubi->min_io_size || in io_init()
704 ubi->max_write_size % ubi->min_io_size || in io_init()
705 !is_power_of_2(ubi->max_write_size)) { in io_init()
706 ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit", in io_init()
707 ubi->max_write_size, ubi->min_io_size); in io_init()
712 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); in io_init()
713 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); in io_init()
715 dbg_gen("min_io_size %d", ubi->min_io_size); in io_init()
716 dbg_gen("max_write_size %d", ubi->max_write_size); in io_init()
717 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); in io_init()
718 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize); in io_init()
719 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize); in io_init()
721 if (ubi->vid_hdr_offset == 0) in io_init()
723 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = in io_init()
724 ubi->ec_hdr_alsize; in io_init()
726 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & in io_init()
727 ~(ubi->hdrs_min_io_size - 1); in io_init()
728 ubi->vid_hdr_shift = ubi->vid_hdr_offset - in io_init()
729 ubi->vid_hdr_aloffset; in io_init()
733 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; in io_init()
734 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); in io_init()
736 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset); in io_init()
737 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); in io_init()
738 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift); in io_init()
739 dbg_gen("leb_start %d", ubi->leb_start); in io_init()
742 if (ubi->vid_hdr_shift % 4) { in io_init()
743 ubi_err(ubi, "unaligned VID header shift %d", in io_init()
744 ubi->vid_hdr_shift); in io_init()
749 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || in io_init()
750 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || in io_init()
751 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || in io_init()
752 ubi->leb_start & (ubi->min_io_size - 1)) { in io_init()
753 ubi_err(ubi, "bad VID header (%d) or data offsets (%d)", in io_init()
754 ubi->vid_hdr_offset, ubi->leb_start); in io_init()
762 ubi->max_erroneous = ubi->peb_count / 10; in io_init()
763 if (ubi->max_erroneous < 16) in io_init()
764 ubi->max_erroneous = 16; in io_init()
765 dbg_gen("max_erroneous %d", ubi->max_erroneous); in io_init()
772 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { in io_init()
773 ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode"); in io_init()
774 ubi->ro_mode = 1; in io_init()
777 ubi->leb_size = ubi->peb_size - ubi->leb_start; in io_init()
779 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { in io_init()
780 ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode", in io_init()
781 ubi->mtd->index); in io_init()
782 ubi->ro_mode = 1; in io_init()
806 static int autoresize(struct ubi_device *ubi, int vol_id) in autoresize() argument
809 struct ubi_volume *vol = ubi->volumes[vol_id]; in autoresize()
812 if (ubi->ro_mode) { in autoresize()
813 ubi_warn(ubi, "skip auto-resize because of R/O mode"); in autoresize()
822 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; in autoresize()
824 if (ubi->avail_pebs == 0) { in autoresize()
831 vtbl_rec = ubi->vtbl[vol_id]; in autoresize()
832 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); in autoresize()
834 ubi_err(ubi, "cannot clean auto-resize flag for volume %d", in autoresize()
839 old_reserved_pebs + ubi->avail_pebs); in autoresize()
841 ubi_err(ubi, "cannot auto-resize volume %d", in autoresize()
848 ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs", in autoresize()
872 struct ubi_device *ubi; in ubi_attach_mtd_dev() local
888 ubi = ubi_devices[i]; in ubi_attach_mtd_dev()
889 if (ubi && mtd->index == ubi->mtd->index) { in ubi_attach_mtd_dev()
890 ubi_err(ubi, "mtd%d is already attached to ubi%d", in ubi_attach_mtd_dev()
905 ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI", in ubi_attach_mtd_dev()
916 ubi_err(ubi, "only %d UBI devices may be created", in ubi_attach_mtd_dev()
926 ubi_err(ubi, "already exists"); in ubi_attach_mtd_dev()
931 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); in ubi_attach_mtd_dev()
932 if (!ubi) in ubi_attach_mtd_dev()
935 ubi->mtd = mtd; in ubi_attach_mtd_dev()
936 ubi->ubi_num = ubi_num; in ubi_attach_mtd_dev()
937 ubi->vid_hdr_offset = vid_hdr_offset; in ubi_attach_mtd_dev()
938 ubi->autoresize_vol_id = -1; in ubi_attach_mtd_dev()
941 ubi->fm_pool.used = ubi->fm_pool.size = 0; in ubi_attach_mtd_dev()
942 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0; in ubi_attach_mtd_dev()
948 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size, in ubi_attach_mtd_dev()
949 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE); in ubi_attach_mtd_dev()
950 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE) in ubi_attach_mtd_dev()
951 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE; in ubi_attach_mtd_dev()
953 ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2; in ubi_attach_mtd_dev()
954 ubi->fm_disabled = !fm_autoconvert; in ubi_attach_mtd_dev()
956 ubi_enable_dbg_chk_fastmap(ubi); in ubi_attach_mtd_dev()
958 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) in ubi_attach_mtd_dev()
960 ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.", in ubi_attach_mtd_dev()
962 ubi->fm_disabled = 1; in ubi_attach_mtd_dev()
965 ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size); in ubi_attach_mtd_dev()
966 ubi_msg(ubi, "default fastmap WL pool size: %d", in ubi_attach_mtd_dev()
967 ubi->fm_wl_pool.max_size); in ubi_attach_mtd_dev()
969 ubi->fm_disabled = 1; in ubi_attach_mtd_dev()
971 mutex_init(&ubi->buf_mutex); in ubi_attach_mtd_dev()
972 mutex_init(&ubi->ckvol_mutex); in ubi_attach_mtd_dev()
973 mutex_init(&ubi->device_mutex); in ubi_attach_mtd_dev()
974 spin_lock_init(&ubi->volumes_lock); in ubi_attach_mtd_dev()
975 init_rwsem(&ubi->fm_protect); in ubi_attach_mtd_dev()
976 init_rwsem(&ubi->fm_eba_sem); in ubi_attach_mtd_dev()
978 ubi_msg(ubi, "attaching mtd%d", mtd->index); in ubi_attach_mtd_dev()
980 err = io_init(ubi, max_beb_per1024); in ubi_attach_mtd_dev()
985 ubi->peb_buf = vmalloc(ubi->peb_size); in ubi_attach_mtd_dev()
986 if (!ubi->peb_buf) in ubi_attach_mtd_dev()
990 ubi->fm_size = ubi_calc_fm_size(ubi); in ubi_attach_mtd_dev()
991 ubi->fm_buf = vzalloc(ubi->fm_size); in ubi_attach_mtd_dev()
992 if (!ubi->fm_buf) in ubi_attach_mtd_dev()
995 err = ubi_attach(ubi, 0); in ubi_attach_mtd_dev()
997 ubi_err(ubi, "failed to attach mtd%d, error %d", in ubi_attach_mtd_dev()
1002 if (ubi->autoresize_vol_id != -1) { in ubi_attach_mtd_dev()
1003 err = autoresize(ubi, ubi->autoresize_vol_id); in ubi_attach_mtd_dev()
1008 err = uif_init(ubi, &ref); in ubi_attach_mtd_dev()
1012 err = ubi_debugfs_init_dev(ubi); in ubi_attach_mtd_dev()
1016 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name); in ubi_attach_mtd_dev()
1017 if (IS_ERR(ubi->bgt_thread)) { in ubi_attach_mtd_dev()
1018 err = PTR_ERR(ubi->bgt_thread); in ubi_attach_mtd_dev()
1019 ubi_err(ubi, "cannot spawn \"%s\", error %d", in ubi_attach_mtd_dev()
1020 ubi->bgt_name, err); in ubi_attach_mtd_dev()
1024 ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)", in ubi_attach_mtd_dev()
1025 mtd->index, mtd->name, ubi->flash_size >> 20); in ubi_attach_mtd_dev()
1026 ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes", in ubi_attach_mtd_dev()
1027 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size); in ubi_attach_mtd_dev()
1028 ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d", in ubi_attach_mtd_dev()
1029 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size); in ubi_attach_mtd_dev()
1030 ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d", in ubi_attach_mtd_dev()
1031 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start); in ubi_attach_mtd_dev()
1032 ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d", in ubi_attach_mtd_dev()
1033 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count); in ubi_attach_mtd_dev()
1034 ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d", in ubi_attach_mtd_dev()
1035 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT, in ubi_attach_mtd_dev()
1036 ubi->vtbl_slots); in ubi_attach_mtd_dev()
1037 ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u", in ubi_attach_mtd_dev()
1038 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD, in ubi_attach_mtd_dev()
1039 ubi->image_seq); in ubi_attach_mtd_dev()
1040 ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d", in ubi_attach_mtd_dev()
1041 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs); in ubi_attach_mtd_dev()
1047 spin_lock(&ubi->wl_lock); in ubi_attach_mtd_dev()
1048 ubi->thread_enabled = 1; in ubi_attach_mtd_dev()
1049 wake_up_process(ubi->bgt_thread); in ubi_attach_mtd_dev()
1050 spin_unlock(&ubi->wl_lock); in ubi_attach_mtd_dev()
1052 ubi_devices[ubi_num] = ubi; in ubi_attach_mtd_dev()
1053 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); in ubi_attach_mtd_dev()
1057 ubi_debugfs_exit_dev(ubi); in ubi_attach_mtd_dev()
1059 get_device(&ubi->dev); in ubi_attach_mtd_dev()
1061 uif_close(ubi); in ubi_attach_mtd_dev()
1063 ubi_wl_close(ubi); in ubi_attach_mtd_dev()
1064 ubi_free_internal_volumes(ubi); in ubi_attach_mtd_dev()
1065 vfree(ubi->vtbl); in ubi_attach_mtd_dev()
1067 vfree(ubi->peb_buf); in ubi_attach_mtd_dev()
1068 vfree(ubi->fm_buf); in ubi_attach_mtd_dev()
1070 put_device(&ubi->dev); in ubi_attach_mtd_dev()
1072 kfree(ubi); in ubi_attach_mtd_dev()
1091 struct ubi_device *ubi; in ubi_detach_mtd_dev() local
1096 ubi = ubi_get_device(ubi_num); in ubi_detach_mtd_dev()
1097 if (!ubi) in ubi_detach_mtd_dev()
1101 put_device(&ubi->dev); in ubi_detach_mtd_dev()
1102 ubi->ref_count -= 1; in ubi_detach_mtd_dev()
1103 if (ubi->ref_count) { in ubi_detach_mtd_dev()
1109 ubi_err(ubi, "%s reference count %d, destroy anyway", in ubi_detach_mtd_dev()
1110 ubi->ubi_name, ubi->ref_count); in ubi_detach_mtd_dev()
1115 ubi_assert(ubi_num == ubi->ubi_num); in ubi_detach_mtd_dev()
1116 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); in ubi_detach_mtd_dev()
1117 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index); in ubi_detach_mtd_dev()
1123 if (!ubi_dbg_chk_fastmap(ubi)) in ubi_detach_mtd_dev()
1124 ubi_update_fastmap(ubi); in ubi_detach_mtd_dev()
1130 if (ubi->bgt_thread) in ubi_detach_mtd_dev()
1131 kthread_stop(ubi->bgt_thread); in ubi_detach_mtd_dev()
1137 get_device(&ubi->dev); in ubi_detach_mtd_dev()
1139 ubi_debugfs_exit_dev(ubi); in ubi_detach_mtd_dev()
1140 uif_close(ubi); in ubi_detach_mtd_dev()
1142 ubi_wl_close(ubi); in ubi_detach_mtd_dev()
1143 ubi_free_internal_volumes(ubi); in ubi_detach_mtd_dev()
1144 vfree(ubi->vtbl); in ubi_detach_mtd_dev()
1145 put_mtd_device(ubi->mtd); in ubi_detach_mtd_dev()
1146 vfree(ubi->peb_buf); in ubi_detach_mtd_dev()
1147 vfree(ubi->fm_buf); in ubi_detach_mtd_dev()
1148 ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index); in ubi_detach_mtd_dev()
1149 put_device(&ubi->dev); in ubi_detach_mtd_dev()