Lines Matching refs:d

175 #define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root)  argument
176 #define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL) argument
177 #define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name)) argument
178 #define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count) argument
197 static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
199 static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_eb_offset() argument
201 return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; in mtdswap_eb_offset()
204 static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_eb_detach() argument
211 oldidx = tp - &d->trees[0]; in mtdswap_eb_detach()
213 d->trees[oldidx].count--; in mtdswap_eb_detach()
237 static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) in mtdswap_rb_add() argument
241 if (eb->root == &d->trees[idx].root) in mtdswap_rb_add()
244 mtdswap_eb_detach(d, eb); in mtdswap_rb_add()
245 root = &d->trees[idx].root; in mtdswap_rb_add()
248 d->trees[idx].count++; in mtdswap_rb_add()
266 static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_handle_badblock() argument
271 d->spare_eblks--; in mtdswap_handle_badblock()
273 mtdswap_eb_detach(d, eb); in mtdswap_handle_badblock()
277 if (!mtd_can_have_bb(d->mtd)) in mtdswap_handle_badblock()
280 offset = mtdswap_eb_offset(d, eb); in mtdswap_handle_badblock()
281 dev_warn(d->dev, "Marking bad block at %08llx\n", offset); in mtdswap_handle_badblock()
282 ret = mtd_block_markbad(d->mtd, offset); in mtdswap_handle_badblock()
285 dev_warn(d->dev, "Mark block bad failed for block at %08llx " in mtdswap_handle_badblock()
294 static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_handle_write_error() argument
297 struct swap_eb *curr_write = d->curr_write; in mtdswap_handle_write_error()
301 d->curr_write = NULL; in mtdswap_handle_write_error()
303 if (!marked && d->curr_write_pos != 0) { in mtdswap_handle_write_error()
304 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_handle_write_error()
309 return mtdswap_handle_badblock(d, eb); in mtdswap_handle_write_error()
312 static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, in mtdswap_read_oob() argument
315 int ret = mtd_read_oob(d->mtd, from, ops); in mtdswap_read_oob()
321 dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n", in mtdswap_read_oob()
327 dev_warn(d->dev, "Read OOB return short read (%zd bytes not " in mtdswap_read_oob()
336 static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_read_markers() argument
343 offset = mtdswap_eb_offset(d, eb); in mtdswap_read_markers()
346 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset)) in mtdswap_read_markers()
349 ops.ooblen = 2 * d->mtd->ecclayout->oobavail; in mtdswap_read_markers()
350 ops.oobbuf = d->oob_buf; in mtdswap_read_markers()
355 ret = mtdswap_read_oob(d, offset, &ops); in mtdswap_read_markers()
360 data = (struct mtdswap_oobdata *)d->oob_buf; in mtdswap_read_markers()
362 (d->oob_buf + d->mtd->ecclayout->oobavail); in mtdswap_read_markers()
382 static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, in mtdswap_write_marker() argument
399 offset = mtdswap_eb_offset(d, eb); in mtdswap_write_marker()
403 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; in mtdswap_write_marker()
406 ret = mtd_write_oob(d->mtd, offset, &ops); in mtdswap_write_marker()
409 dev_warn(d->dev, "Write OOB failed for block at %08llx " in mtdswap_write_marker()
412 mtdswap_handle_write_error(d, eb); in mtdswap_write_marker()
417 dev_warn(d->dev, "Short OOB write for block at %08llx: " in mtdswap_write_marker()
431 static void mtdswap_check_counts(struct mtdswap_dev *d) in mtdswap_check_counts() argument
439 for (i = 0; i < d->eblks; i++) { in mtdswap_check_counts()
440 eb = d->eb_data + i; in mtdswap_check_counts()
455 d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root); in mtdswap_check_counts()
457 for (i = 0; i < d->eblks; i++) { in mtdswap_check_counts()
458 eb = d->eb_data + i; in mtdswap_check_counts()
470 static void mtdswap_scan_eblks(struct mtdswap_dev *d) in mtdswap_scan_eblks() argument
476 for (i = 0; i < d->eblks; i++) { in mtdswap_scan_eblks()
477 eb = d->eb_data + i; in mtdswap_scan_eblks()
479 status = mtdswap_read_markers(d, eb); in mtdswap_scan_eblks()
502 mtdswap_check_counts(d); in mtdswap_scan_eblks()
504 for (i = 0; i < d->eblks; i++) { in mtdswap_scan_eblks()
505 eb = d->eb_data + i; in mtdswap_scan_eblks()
511 mtdswap_rb_add(d, eb, idx); in mtdswap_scan_eblks()
519 static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_store_eb() argument
522 unsigned int maxweight = d->pages_per_eblk; in mtdswap_store_eb()
524 if (eb == d->curr_write) in mtdswap_store_eb()
528 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); in mtdswap_store_eb()
530 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_store_eb()
532 mtdswap_rb_add(d, eb, MTDSWAP_USED); in mtdswap_store_eb()
534 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); in mtdswap_store_eb()
536 mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); in mtdswap_store_eb()
538 mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); in mtdswap_store_eb()
548 static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_erase_block() argument
550 struct mtd_info *mtd = d->mtd; in mtdswap_erase_block()
557 if (eb->erase_count > d->max_erase_count) in mtdswap_erase_block()
558 d->max_erase_count = eb->erase_count; in mtdswap_erase_block()
566 erase.addr = mtdswap_eb_offset(d, eb); in mtdswap_erase_block()
573 dev_warn(d->dev, in mtdswap_erase_block()
580 dev_err(d->dev, "Cannot erase erase block %#llx on %s\n", in mtdswap_erase_block()
583 mtdswap_handle_badblock(d, eb); in mtdswap_erase_block()
590 dev_err(d->dev, "Interrupted erase block %#llx erassure on %s", in mtdswap_erase_block()
597 dev_warn(d->dev, in mtdswap_erase_block()
604 mtdswap_handle_badblock(d, eb); in mtdswap_erase_block()
611 static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page, in mtdswap_map_free_block() argument
615 struct swap_eb *old_eb = d->curr_write; in mtdswap_map_free_block()
619 if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) { in mtdswap_map_free_block()
621 if (TREE_EMPTY(d, CLEAN)) in mtdswap_map_free_block()
624 clean_root = TREE_ROOT(d, CLEAN); in mtdswap_map_free_block()
628 TREE_COUNT(d, CLEAN)--; in mtdswap_map_free_block()
630 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); in mtdswap_map_free_block()
636 d->curr_write_pos = 0; in mtdswap_map_free_block()
637 d->curr_write = eb; in mtdswap_map_free_block()
639 mtdswap_store_eb(d, old_eb); in mtdswap_map_free_block()
642 *block = (d->curr_write - d->eb_data) * d->pages_per_eblk + in mtdswap_map_free_block()
643 d->curr_write_pos; in mtdswap_map_free_block()
645 d->curr_write->active_count++; in mtdswap_map_free_block()
646 d->revmap[*block] = page; in mtdswap_map_free_block()
647 d->curr_write_pos++; in mtdswap_map_free_block()
652 static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d) in mtdswap_free_page_cnt() argument
654 return TREE_COUNT(d, CLEAN) * d->pages_per_eblk + in mtdswap_free_page_cnt()
655 d->pages_per_eblk - d->curr_write_pos; in mtdswap_free_page_cnt()
658 static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d) in mtdswap_enough_free_pages() argument
660 return mtdswap_free_page_cnt(d) > d->pages_per_eblk; in mtdswap_enough_free_pages()
663 static int mtdswap_write_block(struct mtdswap_dev *d, char *buf, in mtdswap_write_block() argument
666 struct mtd_info *mtd = d->mtd; in mtdswap_write_block()
674 while (!mtdswap_enough_free_pages(d)) in mtdswap_write_block()
675 if (mtdswap_gc(d, 0) > 0) in mtdswap_write_block()
678 ret = mtdswap_map_free_block(d, page, bp); in mtdswap_write_block()
679 eb = d->eb_data + (*bp / d->pages_per_eblk); in mtdswap_write_block()
682 d->curr_write = NULL; in mtdswap_write_block()
684 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
694 d->curr_write_pos--; in mtdswap_write_block()
696 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
697 mtdswap_handle_write_error(d, eb); in mtdswap_write_block()
702 dev_err(d->dev, "Write to MTD device failed: %d (%zd written)", in mtdswap_write_block()
708 dev_err(d->dev, "Short write to MTD device: %zd written", in mtdswap_write_block()
717 d->curr_write_pos--; in mtdswap_write_block()
719 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
724 static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock, in mtdswap_move_block() argument
727 struct mtd_info *mtd = d->mtd; in mtdswap_move_block()
734 page = d->revmap[oldblock]; in mtdswap_move_block()
739 ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); in mtdswap_move_block()
742 oldeb = d->eb_data + oldblock / d->pages_per_eblk; in mtdswap_move_block()
745 dev_err(d->dev, "Read Error: %d (block %u)\n", ret, in mtdswap_move_block()
755 dev_err(d->dev, "Short read: %zd (block %u)\n", retlen, in mtdswap_move_block()
761 ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1); in mtdswap_move_block()
763 d->page_data[page] = BLOCK_ERROR; in mtdswap_move_block()
764 dev_err(d->dev, "Write error: %d\n", ret); in mtdswap_move_block()
768 eb = d->eb_data + *newblock / d->pages_per_eblk; in mtdswap_move_block()
769 d->page_data[page] = *newblock; in mtdswap_move_block()
770 d->revmap[oldblock] = PAGE_UNDEF; in mtdswap_move_block()
771 eb = d->eb_data + oldblock / d->pages_per_eblk; in mtdswap_move_block()
777 d->page_data[page] = BLOCK_ERROR; in mtdswap_move_block()
778 d->revmap[oldblock] = PAGE_UNDEF; in mtdswap_move_block()
782 static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_gc_eblock() argument
788 eblk_base = (eb - d->eb_data) * d->pages_per_eblk; in mtdswap_gc_eblock()
790 for (i = 0; i < d->pages_per_eblk; i++) { in mtdswap_gc_eblock()
791 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_gc_eblock()
795 if (d->revmap[block] == PAGE_UNDEF) in mtdswap_gc_eblock()
798 ret = mtdswap_move_block(d, block, &newblock); in mtdswap_gc_eblock()
806 static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d) in __mtdswap_choose_gc_tree() argument
810 if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD) in __mtdswap_choose_gc_tree()
816 if (d->trees[idx].root.rb_node != NULL) in __mtdswap_choose_gc_tree()
850 static int mtdswap_choose_wl_tree(struct mtdswap_dev *d) in mtdswap_choose_wl_tree() argument
858 root = &d->trees[i].root; in mtdswap_choose_wl_tree()
862 wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root); in mtdswap_choose_wl_tree()
878 static int mtdswap_choose_gc_tree(struct mtdswap_dev *d, in mtdswap_choose_gc_tree() argument
883 if (TREE_NONEMPTY(d, FAILING) && in mtdswap_choose_gc_tree()
884 (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY)))) in mtdswap_choose_gc_tree()
887 idx = mtdswap_choose_wl_tree(d); in mtdswap_choose_gc_tree()
891 return __mtdswap_choose_gc_tree(d); in mtdswap_choose_gc_tree()
894 static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d, in mtdswap_pick_gc_eblk() argument
901 if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD && in mtdswap_pick_gc_eblk()
902 TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING)) in mtdswap_pick_gc_eblk()
905 idx = mtdswap_choose_gc_tree(d, background); in mtdswap_pick_gc_eblk()
909 rp = &d->trees[idx].root; in mtdswap_pick_gc_eblk()
914 d->trees[idx].count--; in mtdswap_pick_gc_eblk()
923 static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, in mtdswap_eblk_passes() argument
926 struct mtd_info *mtd = d->mtd; in mtdswap_eblk_passes()
929 unsigned int *p1 = (unsigned int *)d->page_buf; in mtdswap_eblk_passes()
930 unsigned char *p2 = (unsigned char *)d->oob_buf; in mtdswap_eblk_passes()
938 ops.datbuf = d->page_buf; in mtdswap_eblk_passes()
939 ops.oobbuf = d->oob_buf; in mtdswap_eblk_passes()
940 base = mtdswap_eb_offset(d, eb); in mtdswap_eblk_passes()
941 mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize; in mtdswap_eblk_passes()
947 memset(d->page_buf, patt, mtd->writesize); in mtdswap_eblk_passes()
948 memset(d->oob_buf, patt, mtd->ecclayout->oobavail); in mtdswap_eblk_passes()
974 ret = mtdswap_erase_block(d, eb); in mtdswap_eblk_passes()
983 mtdswap_handle_badblock(d, eb); in mtdswap_eblk_passes()
987 static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background) in mtdswap_gc() argument
992 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_gc()
995 eb = mtdswap_pick_gc_eblk(d, background); in mtdswap_gc()
999 ret = mtdswap_gc_eblock(d, eb); in mtdswap_gc()
1004 mtdswap_handle_badblock(d, eb); in mtdswap_gc()
1009 ret = mtdswap_erase_block(d, eb); in mtdswap_gc()
1011 (ret || !mtdswap_eblk_passes(d, eb))) in mtdswap_gc()
1015 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); in mtdswap_gc()
1018 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); in mtdswap_gc()
1020 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); in mtdswap_gc()
1027 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_background() local
1031 ret = mtdswap_gc(d, 1); in mtdswap_background()
1037 static void mtdswap_cleanup(struct mtdswap_dev *d) in mtdswap_cleanup() argument
1039 vfree(d->eb_data); in mtdswap_cleanup()
1040 vfree(d->revmap); in mtdswap_cleanup()
1041 vfree(d->page_data); in mtdswap_cleanup()
1042 kfree(d->oob_buf); in mtdswap_cleanup()
1043 kfree(d->page_buf); in mtdswap_cleanup()
1048 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_flush() local
1050 mtd_sync(d->mtd); in mtdswap_flush()
1072 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_writesect() local
1077 d->sect_write_count++; in mtdswap_writesect()
1079 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_writesect()
1090 mapped = d->page_data[page]; in mtdswap_writesect()
1092 eb = d->eb_data + (mapped / d->pages_per_eblk); in mtdswap_writesect()
1094 mtdswap_store_eb(d, eb); in mtdswap_writesect()
1095 d->page_data[page] = BLOCK_UNDEF; in mtdswap_writesect()
1096 d->revmap[mapped] = PAGE_UNDEF; in mtdswap_writesect()
1099 ret = mtdswap_write_block(d, buf, page, &newblock, 0); in mtdswap_writesect()
1100 d->mtd_write_count++; in mtdswap_writesect()
1105 eb = d->eb_data + (newblock / d->pages_per_eblk); in mtdswap_writesect()
1106 d->page_data[page] = newblock; in mtdswap_writesect()
1112 static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf) in mtdswap_auto_header() argument
1119 hd->info.last_page = d->mbd_dev->size - 1; in mtdswap_auto_header()
1130 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_readsect() local
1131 struct mtd_info *mtd = d->mtd; in mtdswap_readsect()
1138 d->sect_read_count++; in mtdswap_readsect()
1142 return mtdswap_auto_header(d, buf); in mtdswap_readsect()
1147 realblock = d->page_data[page]; in mtdswap_readsect()
1156 eb = d->eb_data + (realblock / d->pages_per_eblk); in mtdswap_readsect()
1157 BUG_ON(d->revmap[realblock] == PAGE_UNDEF); in mtdswap_readsect()
1165 d->mtd_read_count++; in mtdswap_readsect()
1168 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); in mtdswap_readsect()
1173 dev_err(d->dev, "Read error %d\n", ret); in mtdswap_readsect()
1175 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_readsect()
1184 dev_err(d->dev, "Short read %zd\n", retlen); in mtdswap_readsect()
1194 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_discard() local
1199 d->discard_count++; in mtdswap_discard()
1202 mapped = d->page_data[page]; in mtdswap_discard()
1204 eb = d->eb_data + (mapped / d->pages_per_eblk); in mtdswap_discard()
1206 mtdswap_store_eb(d, eb); in mtdswap_discard()
1207 d->page_data[page] = BLOCK_UNDEF; in mtdswap_discard()
1208 d->revmap[mapped] = PAGE_UNDEF; in mtdswap_discard()
1209 d->discard_page_count++; in mtdswap_discard()
1211 d->page_data[page] = BLOCK_UNDEF; in mtdswap_discard()
1212 d->discard_page_count++; in mtdswap_discard()
1221 struct mtdswap_dev *d = (struct mtdswap_dev *) s->private; in mtdswap_show() local
1231 mutex_lock(&d->mbd_dev->lock); in mtdswap_show()
1234 struct rb_root *root = &d->trees[i].root; in mtdswap_show()
1237 count[i] = d->trees[i].count; in mtdswap_show()
1246 if (d->curr_write) { in mtdswap_show()
1248 cwp = d->curr_write_pos; in mtdswap_show()
1249 cwecount = d->curr_write->erase_count; in mtdswap_show()
1253 for (i = 0; i < d->eblks; i++) in mtdswap_show()
1254 sum += d->eb_data[i].erase_count; in mtdswap_show()
1256 use_size = (uint64_t)d->eblks * d->mtd->erasesize; in mtdswap_show()
1257 bb_cnt = mtdswap_badblocks(d->mtd, use_size); in mtdswap_show()
1260 pages = d->mbd_dev->size; in mtdswap_show()
1262 if (d->page_data[i] != BLOCK_UNDEF) in mtdswap_show()
1265 mutex_unlock(&d->mbd_dev->lock); in mtdswap_show()
1286 cwp, d->pages_per_eblk - cwp, cwecount); in mtdswap_show()
1292 seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count); in mtdswap_show()
1293 seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count); in mtdswap_show()
1294 seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count); in mtdswap_show()
1295 seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count); in mtdswap_show()
1296 seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count); in mtdswap_show()
1297 seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count); in mtdswap_show()
1318 static int mtdswap_add_debugfs(struct mtdswap_dev *d) in mtdswap_add_debugfs() argument
1320 struct gendisk *gd = d->mbd_dev->disk; in mtdswap_add_debugfs()
1335 d->debugfs_root = root; in mtdswap_add_debugfs()
1337 dent = debugfs_create_file("stats", S_IRUSR, root, d, in mtdswap_add_debugfs()
1340 dev_err(d->dev, "debugfs_create_file failed\n"); in mtdswap_add_debugfs()
1342 d->debugfs_root = NULL; in mtdswap_add_debugfs()
1349 static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, in mtdswap_init() argument
1352 struct mtd_info *mtd = d->mbd_dev->mtd; in mtdswap_init()
1356 d->mtd = mtd; in mtdswap_init()
1357 d->eblks = eblocks; in mtdswap_init()
1358 d->spare_eblks = spare_cnt; in mtdswap_init()
1359 d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT; in mtdswap_init()
1361 pages = d->mbd_dev->size; in mtdswap_init()
1362 blocks = eblocks * d->pages_per_eblk; in mtdswap_init()
1365 d->trees[i].root = RB_ROOT; in mtdswap_init()
1367 d->page_data = vmalloc(sizeof(int)*pages); in mtdswap_init()
1368 if (!d->page_data) in mtdswap_init()
1371 d->revmap = vmalloc(sizeof(int)*blocks); in mtdswap_init()
1372 if (!d->revmap) in mtdswap_init()
1375 eblk_bytes = sizeof(struct swap_eb)*d->eblks; in mtdswap_init()
1376 d->eb_data = vzalloc(eblk_bytes); in mtdswap_init()
1377 if (!d->eb_data) in mtdswap_init()
1381 d->page_data[i] = BLOCK_UNDEF; in mtdswap_init()
1384 d->revmap[i] = PAGE_UNDEF; in mtdswap_init()
1386 d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); in mtdswap_init()
1387 if (!d->page_buf) in mtdswap_init()
1390 d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL); in mtdswap_init()
1391 if (!d->oob_buf) in mtdswap_init()
1394 mtdswap_scan_eblks(d); in mtdswap_init()
1399 kfree(d->page_buf); in mtdswap_init()
1401 vfree(d->eb_data); in mtdswap_init()
1403 vfree(d->revmap); in mtdswap_init()
1405 vfree(d->page_data); in mtdswap_init()
1413 struct mtdswap_dev *d; in mtdswap_add_mtd() local
1503 d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL); in mtdswap_add_mtd()
1504 if (!d) in mtdswap_add_mtd()
1509 kfree(d); in mtdswap_add_mtd()
1513 d->mbd_dev = mbd_dev; in mtdswap_add_mtd()
1514 mbd_dev->priv = d; in mtdswap_add_mtd()
1524 if (mtdswap_init(d, eblocks, spare_cnt) < 0) in mtdswap_add_mtd()
1530 d->dev = disk_to_dev(mbd_dev->disk); in mtdswap_add_mtd()
1532 ret = mtdswap_add_debugfs(d); in mtdswap_add_mtd()
1542 mtdswap_cleanup(d); in mtdswap_add_mtd()
1546 kfree(d); in mtdswap_add_mtd()
1551 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_remove_dev() local
1553 debugfs_remove_recursive(d->debugfs_root); in mtdswap_remove_dev()
1555 mtdswap_cleanup(d); in mtdswap_remove_dev()
1556 kfree(d); in mtdswap_remove_dev()