Lines Matching refs:msb

151 static int msb_validate_used_block_bitmap(struct msb_data *msb)  in msb_validate_used_block_bitmap()  argument
159 for (i = 0; i < msb->zone_count; i++) in msb_validate_used_block_bitmap()
160 total_free_blocks += msb->free_block_count[i]; in msb_validate_used_block_bitmap()
162 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap, in msb_validate_used_block_bitmap()
163 msb->block_count) == total_free_blocks) in msb_validate_used_block_bitmap()
167 msb->read_only = true; in msb_validate_used_block_bitmap()
172 static void msb_mark_block_used(struct msb_data *msb, int pba) in msb_mark_block_used() argument
176 if (test_bit(pba, msb->used_blocks_bitmap)) { in msb_mark_block_used()
179 msb->read_only = true; in msb_mark_block_used()
183 if (msb_validate_used_block_bitmap(msb)) in msb_mark_block_used()
187 __set_bit(pba, msb->used_blocks_bitmap); in msb_mark_block_used()
188 msb->free_block_count[zone]--; in msb_mark_block_used()
192 static void msb_mark_block_unused(struct msb_data *msb, int pba) in msb_mark_block_unused() argument
196 if (!test_bit(pba, msb->used_blocks_bitmap)) { in msb_mark_block_unused()
198 msb->read_only = true; in msb_mark_block_unused()
202 if (msb_validate_used_block_bitmap(msb)) in msb_mark_block_unused()
206 __clear_bit(pba, msb->used_blocks_bitmap); in msb_mark_block_unused()
207 msb->free_block_count[zone]++; in msb_mark_block_unused()
211 static void msb_invalidate_reg_window(struct msb_data *msb) in msb_invalidate_reg_window() argument
213 msb->reg_addr.w_offset = offsetof(struct ms_register, id); in msb_invalidate_reg_window()
214 msb->reg_addr.w_length = sizeof(struct ms_id_register); in msb_invalidate_reg_window()
215 msb->reg_addr.r_offset = offsetof(struct ms_register, id); in msb_invalidate_reg_window()
216 msb->reg_addr.r_length = sizeof(struct ms_id_register); in msb_invalidate_reg_window()
217 msb->addr_valid = false; in msb_invalidate_reg_window()
221 static int msb_run_state_machine(struct msb_data *msb, int (*state_func) in msb_run_state_machine() argument
224 struct memstick_dev *card = msb->card; in msb_run_state_machine()
226 WARN_ON(msb->state != -1); in msb_run_state_machine()
227 msb->int_polling = false; in msb_run_state_machine()
228 msb->state = 0; in msb_run_state_machine()
229 msb->exit_error = 0; in msb_run_state_machine()
237 WARN_ON(msb->state != -1); in msb_run_state_machine()
238 return msb->exit_error; in msb_run_state_machine()
242 static int msb_exit_state_machine(struct msb_data *msb, int error) in msb_exit_state_machine() argument
244 WARN_ON(msb->state == -1); in msb_exit_state_machine()
246 msb->state = -1; in msb_exit_state_machine()
247 msb->exit_error = error; in msb_exit_state_machine()
248 msb->card->next_request = h_msb_default_bad; in msb_exit_state_machine()
252 msb_invalidate_reg_window(msb); in msb_exit_state_machine()
254 complete(&msb->card->mrq_complete); in msb_exit_state_machine()
259 static int msb_read_int_reg(struct msb_data *msb, long timeout) in msb_read_int_reg() argument
261 struct memstick_request *mrq = &msb->card->current_mrq; in msb_read_int_reg()
263 WARN_ON(msb->state == -1); in msb_read_int_reg()
265 if (!msb->int_polling) { in msb_read_int_reg()
266 msb->int_timeout = jiffies + in msb_read_int_reg()
268 msb->int_polling = true; in msb_read_int_reg()
269 } else if (time_after(jiffies, msb->int_timeout)) { in msb_read_int_reg()
274 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) && in msb_read_int_reg()
286 static int msb_read_regs(struct msb_data *msb, int offset, int len) in msb_read_regs() argument
288 struct memstick_request *req = &msb->card->current_mrq; in msb_read_regs()
290 if (msb->reg_addr.r_offset != offset || in msb_read_regs()
291 msb->reg_addr.r_length != len || !msb->addr_valid) { in msb_read_regs()
293 msb->reg_addr.r_offset = offset; in msb_read_regs()
294 msb->reg_addr.r_length = len; in msb_read_regs()
295 msb->addr_valid = true; in msb_read_regs()
298 &msb->reg_addr, sizeof(msb->reg_addr)); in msb_read_regs()
307 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf) in msb_write_regs() argument
309 struct memstick_request *req = &msb->card->current_mrq; in msb_write_regs()
311 if (msb->reg_addr.w_offset != offset || in msb_write_regs()
312 msb->reg_addr.w_length != len || !msb->addr_valid) { in msb_write_regs()
314 msb->reg_addr.w_offset = offset; in msb_write_regs()
315 msb->reg_addr.w_length = len; in msb_write_regs()
316 msb->addr_valid = true; in msb_write_regs()
319 &msb->reg_addr, sizeof(msb->reg_addr)); in msb_write_regs()
342 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_read_page() local
349 return msb_exit_state_machine(msb, mrq->error); in h_msb_read_page()
352 switch (msb->state) { in h_msb_read_page()
357 if (!msb_write_regs(msb, in h_msb_read_page()
360 (unsigned char *)&msb->regs.param)) in h_msb_read_page()
363 msb->state = MSB_RP_SEND_READ_COMMAND; in h_msb_read_page()
369 msb->state = MSB_RP_SEND_INT_REQ; in h_msb_read_page()
373 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT; in h_msb_read_page()
376 if (msb_read_int_reg(msb, -1)) in h_msb_read_page()
382 msb->regs.status.interrupt = intreg; in h_msb_read_page()
385 return msb_exit_state_machine(msb, -EIO); in h_msb_read_page()
388 msb->state = MSB_RP_SEND_INT_REQ; in h_msb_read_page()
392 msb->int_polling = false; in h_msb_read_page()
393 msb->state = (intreg & MEMSTICK_INT_ERR) ? in h_msb_read_page()
399 if (!msb_read_regs(msb, in h_msb_read_page()
404 msb->state = MSB_RP_RECEIVE_STATUS_REG; in h_msb_read_page()
408 msb->regs.status = *(struct ms_status_register *)mrq->data; in h_msb_read_page()
409 msb->state = MSB_RP_SEND_OOB_READ; in h_msb_read_page()
413 if (!msb_read_regs(msb, in h_msb_read_page()
418 msb->state = MSB_RP_RECEIVE_OOB_READ; in h_msb_read_page()
422 msb->regs.extra_data = in h_msb_read_page()
424 msb->state = MSB_RP_SEND_READ_DATA; in h_msb_read_page()
429 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) { in h_msb_read_page()
430 msb->state = MSB_RP_RECEIVE_READ_DATA; in h_msb_read_page()
435 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), in h_msb_read_page()
436 msb->current_sg_offset, in h_msb_read_page()
437 msb->page_size); in h_msb_read_page()
440 msb->state = MSB_RP_RECEIVE_READ_DATA; in h_msb_read_page()
444 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) { in h_msb_read_page()
445 msb->current_sg_offset += msb->page_size; in h_msb_read_page()
446 return msb_exit_state_machine(msb, 0); in h_msb_read_page()
449 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) { in h_msb_read_page()
451 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_read_page()
454 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) { in h_msb_read_page()
456 msb->current_sg_offset += msb->page_size; in h_msb_read_page()
457 return msb_exit_state_machine(msb, -EUCLEAN); in h_msb_read_page()
460 return msb_exit_state_machine(msb, -EIO); in h_msb_read_page()
478 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_write_block() local
484 return msb_exit_state_machine(msb, mrq->error); in h_msb_write_block()
487 switch (msb->state) { in h_msb_write_block()
496 if (!msb_write_regs(msb, in h_msb_write_block()
499 &msb->regs.param)) in h_msb_write_block()
502 msb->state = MSB_WB_SEND_WRITE_OOB; in h_msb_write_block()
506 if (!msb_write_regs(msb, in h_msb_write_block()
509 &msb->regs.extra_data)) in h_msb_write_block()
511 msb->state = MSB_WB_SEND_WRITE_COMMAND; in h_msb_write_block()
518 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
522 msb->state = MSB_WB_RECEIVE_INT_REQ; in h_msb_write_block()
523 if (msb_read_int_reg(msb, -1)) in h_msb_write_block()
529 msb->regs.status.interrupt = intreg; in h_msb_write_block()
533 return msb_exit_state_machine(msb, -EIO); in h_msb_write_block()
536 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_write_block()
540 if (msb->current_page == msb->pages_in_block) { in h_msb_write_block()
542 return msb_exit_state_machine(msb, 0); in h_msb_write_block()
543 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
550 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
554 msb->int_polling = false; in h_msb_write_block()
555 msb->state = MSB_WB_SEND_WRITE_DATA; in h_msb_write_block()
561 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), in h_msb_write_block()
562 msb->current_sg_offset, in h_msb_write_block()
563 msb->page_size) < msb->page_size) in h_msb_write_block()
564 return msb_exit_state_machine(msb, -EIO); in h_msb_write_block()
568 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION; in h_msb_write_block()
572 msb->current_page++; in h_msb_write_block()
573 msb->current_sg_offset += msb->page_size; in h_msb_write_block()
574 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
590 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_send_command() local
596 return msb_exit_state_machine(msb, mrq->error); in h_msb_send_command()
599 switch (msb->state) { in h_msb_send_command()
603 if (!msb_write_regs(msb, in h_msb_send_command()
606 &msb->regs.param)) in h_msb_send_command()
608 msb->state = MSB_SC_SEND_WRITE_OOB; in h_msb_send_command()
612 if (!msb->command_need_oob) { in h_msb_send_command()
613 msb->state = MSB_SC_SEND_COMMAND; in h_msb_send_command()
617 if (!msb_write_regs(msb, in h_msb_send_command()
620 &msb->regs.extra_data)) in h_msb_send_command()
623 msb->state = MSB_SC_SEND_COMMAND; in h_msb_send_command()
627 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1); in h_msb_send_command()
628 msb->state = MSB_SC_SEND_INT_REQ; in h_msb_send_command()
632 msb->state = MSB_SC_RECEIVE_INT_REQ; in h_msb_send_command()
633 if (msb_read_int_reg(msb, -1)) in h_msb_send_command()
641 return msb_exit_state_machine(msb, -EIO); in h_msb_send_command()
643 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_send_command()
646 msb->state = MSB_SC_SEND_INT_REQ; in h_msb_send_command()
650 return msb_exit_state_machine(msb, 0); in h_msb_send_command()
661 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_reset() local
665 return msb_exit_state_machine(msb, mrq->error); in h_msb_reset()
667 switch (msb->state) { in h_msb_reset()
671 msb->state = MSB_RS_CONFIRM; in h_msb_reset()
674 return msb_exit_state_machine(msb, 0); in h_msb_reset()
683 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_parallel_switch() local
689 msb->regs.param.system &= ~MEMSTICK_SYS_PAM; in h_msb_parallel_switch()
690 return msb_exit_state_machine(msb, mrq->error); in h_msb_parallel_switch()
693 switch (msb->state) { in h_msb_parallel_switch()
696 msb->regs.param.system |= MEMSTICK_SYS_PAM; in h_msb_parallel_switch()
698 if (!msb_write_regs(msb, in h_msb_parallel_switch()
701 (unsigned char *)&msb->regs.param)) in h_msb_parallel_switch()
704 msb->state = MSB_PS_SWICH_HOST; in h_msb_parallel_switch()
712 msb->state = MSB_PS_CONFIRM; in h_msb_parallel_switch()
716 return msb_exit_state_machine(msb, 0); in h_msb_parallel_switch()
722 static int msb_switch_to_parallel(struct msb_data *msb);
725 static int msb_reset(struct msb_data *msb, bool full) in msb_reset() argument
728 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM; in msb_reset()
729 struct memstick_dev *card = msb->card; in msb_reset()
734 msb->regs.param.system = MEMSTICK_SYS_BAMD; in msb_reset()
742 msb_invalidate_reg_window(msb); in msb_reset()
754 msb->read_only = true; in msb_reset()
759 error = msb_run_state_machine(msb, h_msb_reset); in msb_reset()
762 msb->read_only = true; in msb_reset()
768 msb_switch_to_parallel(msb); in msb_reset()
773 static int msb_switch_to_parallel(struct msb_data *msb) in msb_switch_to_parallel() argument
777 error = msb_run_state_machine(msb, h_msb_parallel_switch); in msb_switch_to_parallel()
780 msb->regs.param.system &= ~MEMSTICK_SYS_PAM; in msb_switch_to_parallel()
781 msb_reset(msb, true); in msb_switch_to_parallel()
785 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT; in msb_switch_to_parallel()
790 static int msb_set_overwrite_flag(struct msb_data *msb, in msb_set_overwrite_flag() argument
793 if (msb->read_only) in msb_set_overwrite_flag()
796 msb->regs.param.block_address = cpu_to_be16(pba); in msb_set_overwrite_flag()
797 msb->regs.param.page_address = page; in msb_set_overwrite_flag()
798 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE; in msb_set_overwrite_flag()
799 msb->regs.extra_data.overwrite_flag = flag; in msb_set_overwrite_flag()
800 msb->command_value = MS_CMD_BLOCK_WRITE; in msb_set_overwrite_flag()
801 msb->command_need_oob = true; in msb_set_overwrite_flag()
805 return msb_run_state_machine(msb, h_msb_send_command); in msb_set_overwrite_flag()
808 static int msb_mark_bad(struct msb_data *msb, int pba) in msb_mark_bad() argument
811 msb_reset(msb, true); in msb_mark_bad()
813 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST); in msb_mark_bad()
816 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page) in msb_mark_page_bad() argument
819 msb_reset(msb, true); in msb_mark_page_bad()
820 return msb_set_overwrite_flag(msb, in msb_mark_page_bad()
825 static int msb_erase_block(struct msb_data *msb, u16 pba) in msb_erase_block() argument
828 if (msb->read_only) in msb_erase_block()
834 msb->regs.param.block_address = cpu_to_be16(pba); in msb_erase_block()
835 msb->regs.param.page_address = 0; in msb_erase_block()
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK; in msb_erase_block()
837 msb->command_value = MS_CMD_BLOCK_ERASE; in msb_erase_block()
838 msb->command_need_oob = false; in msb_erase_block()
841 error = msb_run_state_machine(msb, h_msb_send_command); in msb_erase_block()
842 if (!error || msb_reset(msb, true)) in msb_erase_block()
848 msb_mark_bad(msb, pba); in msb_erase_block()
852 msb_mark_block_unused(msb, pba); in msb_erase_block()
853 __set_bit(pba, msb->erased_blocks_bitmap); in msb_erase_block()
858 static int msb_read_page(struct msb_data *msb, in msb_read_page() argument
867 size_t len = msb->page_size; in msb_read_page()
901 if (pba >= msb->block_count) { in msb_read_page()
907 msb->regs.param.block_address = cpu_to_be16(pba); in msb_read_page()
908 msb->regs.param.page_address = page; in msb_read_page()
909 msb->regs.param.cp = MEMSTICK_CP_PAGE; in msb_read_page()
911 msb->current_sg = sg; in msb_read_page()
912 msb->current_sg_offset = offset; in msb_read_page()
913 error = msb_run_state_machine(msb, h_msb_read_page); in msb_read_page()
923 *extra = msb->regs.extra_data; in msb_read_page()
925 if (!error || msb_reset(msb, true)) in msb_read_page()
935 if (msb->regs.extra_data.overwrite_flag & in msb_read_page()
937 msb_mark_page_bad(msb, pba, page); in msb_read_page()
948 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page, in msb_read_oob() argument
954 msb->regs.param.block_address = cpu_to_be16(pba); in msb_read_oob()
955 msb->regs.param.page_address = page; in msb_read_oob()
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA; in msb_read_oob()
958 if (pba > msb->block_count) { in msb_read_oob()
963 error = msb_run_state_machine(msb, h_msb_read_page); in msb_read_oob()
964 *extra = msb->regs.extra_data; in msb_read_oob()
976 static int msb_verify_block(struct msb_data *msb, u16 pba, in msb_verify_block() argument
982 sg_init_one(&sg, msb->block_buffer, msb->block_size); in msb_verify_block()
984 while (page < msb->pages_in_block) { in msb_verify_block()
986 error = msb_read_page(msb, pba, page, in msb_verify_block()
987 NULL, &sg, page * msb->page_size); in msb_verify_block()
994 msb->block_buffer, msb->block_size)) in msb_verify_block()
1000 static int msb_write_block(struct msb_data *msb, in msb_write_block() argument
1004 BUG_ON(sg->length < msb->page_size); in msb_write_block()
1006 if (msb->read_only) in msb_write_block()
1015 if (pba >= msb->block_count || lba >= msb->logical_block_count) { in msb_write_block()
1026 if (pba == msb->boot_block_locations[0] || in msb_write_block()
1027 pba == msb->boot_block_locations[1]) { in msb_write_block()
1034 if (msb->read_only) in msb_write_block()
1037 msb->regs.param.cp = MEMSTICK_CP_BLOCK; in msb_write_block()
1038 msb->regs.param.page_address = 0; in msb_write_block()
1039 msb->regs.param.block_address = cpu_to_be16(pba); in msb_write_block()
1041 msb->regs.extra_data.management_flag = 0xFF; in msb_write_block()
1042 msb->regs.extra_data.overwrite_flag = 0xF8; in msb_write_block()
1043 msb->regs.extra_data.logical_address = cpu_to_be16(lba); in msb_write_block()
1045 msb->current_sg = sg; in msb_write_block()
1046 msb->current_sg_offset = offset; in msb_write_block()
1047 msb->current_page = 0; in msb_write_block()
1049 error = msb_run_state_machine(msb, h_msb_write_block); in msb_write_block()
1058 !test_bit(pba, msb->erased_blocks_bitmap))) in msb_write_block()
1059 error = msb_verify_block(msb, pba, sg, offset); in msb_write_block()
1064 if (current_try > 1 || msb_reset(msb, true)) in msb_write_block()
1068 error = msb_erase_block(msb, pba); in msb_write_block()
1078 static u16 msb_get_free_block(struct msb_data *msb, int zone) in msb_get_free_block() argument
1086 if (!msb->free_block_count[zone]) { in msb_get_free_block()
1088 msb->read_only = true; in msb_get_free_block()
1092 pos %= msb->free_block_count[zone]; in msb_get_free_block()
1095 msb->free_block_count[zone], pos); in msb_get_free_block()
1097 pba = find_next_zero_bit(msb->used_blocks_bitmap, in msb_get_free_block()
1098 msb->block_count, pba); in msb_get_free_block()
1100 pba = find_next_zero_bit(msb->used_blocks_bitmap, in msb_get_free_block()
1101 msb->block_count, pba + 1); in msb_get_free_block()
1105 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) { in msb_get_free_block()
1107 msb->read_only = true; in msb_get_free_block()
1111 msb_mark_block_used(msb, pba); in msb_get_free_block()
1115 static int msb_update_block(struct msb_data *msb, u16 lba, in msb_update_block() argument
1121 pba = msb->lba_to_pba_table[lba]; in msb_update_block()
1126 msb_set_overwrite_flag(msb, pba, 0, in msb_update_block()
1131 new_pba = msb_get_free_block(msb, in msb_update_block()
1141 error = msb_write_block(msb, new_pba, lba, sg, offset); in msb_update_block()
1143 msb_mark_bad(msb, new_pba); in msb_update_block()
1151 msb_erase_block(msb, pba); in msb_update_block()
1152 msb->lba_to_pba_table[lba] = new_pba; in msb_update_block()
1158 msb->read_only = true; in msb_update_block()
1190 static int msb_read_boot_blocks(struct msb_data *msb) in msb_read_boot_blocks() argument
1197 msb->boot_block_locations[0] = MS_BLOCK_INVALID; in msb_read_boot_blocks()
1198 msb->boot_block_locations[1] = MS_BLOCK_INVALID; in msb_read_boot_blocks()
1199 msb->boot_block_count = 0; in msb_read_boot_blocks()
1203 if (!msb->boot_page) { in msb_read_boot_blocks()
1208 msb->boot_page = page; in msb_read_boot_blocks()
1210 page = msb->boot_page; in msb_read_boot_blocks()
1212 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR; in msb_read_boot_blocks()
1217 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) { in msb_read_boot_blocks()
1234 msb->boot_block_locations[msb->boot_block_count] = pba; in msb_read_boot_blocks()
1237 msb->boot_block_count++; in msb_read_boot_blocks()
1239 if (msb->boot_block_count == 2) in msb_read_boot_blocks()
1243 if (!msb->boot_block_count) { in msb_read_boot_blocks()
1252 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr) in msb_read_bad_block_table() argument
1263 boot_block = &msb->boot_page[block_nr]; in msb_read_bad_block_table()
1264 pba = msb->boot_block_locations[block_nr]; in msb_read_bad_block_table()
1266 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID) in msb_read_bad_block_table()
1275 page = data_offset / msb->page_size; in msb_read_bad_block_table()
1276 page_offset = data_offset % msb->page_size; in msb_read_bad_block_table()
1278 DIV_ROUND_UP(data_size + page_offset, msb->page_size) * in msb_read_bad_block_table()
1279 msb->page_size; in msb_read_bad_block_table()
1292 error = msb_read_page(msb, pba, page, NULL, &sg, offset); in msb_read_bad_block_table()
1297 offset += msb->page_size; in msb_read_bad_block_table()
1299 if (page == msb->pages_in_block) { in msb_read_bad_block_table()
1311 if (bad_block >= msb->block_count) { in msb_read_bad_block_table()
1317 if (test_bit(bad_block, msb->used_blocks_bitmap)) { in msb_read_bad_block_table()
1324 msb_mark_block_used(msb, bad_block); in msb_read_bad_block_table()
1331 static int msb_ftl_initialize(struct msb_data *msb) in msb_ftl_initialize() argument
1335 if (msb->ftl_initialized) in msb_ftl_initialize()
1338 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE; in msb_ftl_initialize()
1339 msb->logical_block_count = msb->zone_count * 496 - 2; in msb_ftl_initialize()
1341 msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL); in msb_ftl_initialize()
1342 msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL); in msb_ftl_initialize()
1343 msb->lba_to_pba_table = in msb_ftl_initialize()
1344 kmalloc(msb->logical_block_count * sizeof(u16), GFP_KERNEL); in msb_ftl_initialize()
1346 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table || in msb_ftl_initialize()
1347 !msb->erased_blocks_bitmap) { in msb_ftl_initialize()
1348 kfree(msb->used_blocks_bitmap); in msb_ftl_initialize()
1349 kfree(msb->lba_to_pba_table); in msb_ftl_initialize()
1350 kfree(msb->erased_blocks_bitmap); in msb_ftl_initialize()
1354 for (i = 0; i < msb->zone_count; i++) in msb_ftl_initialize()
1355 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE; in msb_ftl_initialize()
1357 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID, in msb_ftl_initialize()
1358 msb->logical_block_count * sizeof(u16)); in msb_ftl_initialize()
1361 msb->zone_count, msb->logical_block_count); in msb_ftl_initialize()
1363 msb->ftl_initialized = true; in msb_ftl_initialize()
1367 static int msb_ftl_scan(struct msb_data *msb) in msb_ftl_scan() argument
1373 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL); in msb_ftl_scan()
1379 for (pba = 0; pba < msb->block_count; pba++) { in msb_ftl_scan()
1381 if (pba == msb->boot_block_locations[0] || in msb_ftl_scan()
1382 pba == msb->boot_block_locations[1]) { in msb_ftl_scan()
1384 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1388 if (test_bit(pba, msb->used_blocks_bitmap)) { in msb_ftl_scan()
1394 error = msb_read_oob(msb, pba, 0, &extra); in msb_ftl_scan()
1400 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1401 msb_erase_block(msb, pba); in msb_ftl_scan()
1419 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1428 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1436 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1437 msb_erase_block(msb, pba); in msb_ftl_scan()
1446 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1452 msb_erase_block(msb, pba); in msb_ftl_scan()
1457 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) { in msb_ftl_scan()
1459 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1463 other_block = msb->lba_to_pba_table[lba]; in msb_ftl_scan()
1471 msb_erase_block(msb, other_block); in msb_ftl_scan()
1472 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1479 msb_erase_block(msb, pba); in msb_ftl_scan()
1486 msb_erase_block(msb, other_block); in msb_ftl_scan()
1487 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1497 struct msb_data *msb = (struct msb_data *)data; in msb_cache_flush_timer() local
1498 msb->need_flush_cache = true; in msb_cache_flush_timer()
1499 queue_work(msb->io_queue, &msb->io_work); in msb_cache_flush_timer()
1503 static void msb_cache_discard(struct msb_data *msb) in msb_cache_discard() argument
1505 if (msb->cache_block_lba == MS_BLOCK_INVALID) in msb_cache_discard()
1508 del_timer_sync(&msb->cache_flush_timer); in msb_cache_discard()
1511 msb->cache_block_lba = MS_BLOCK_INVALID; in msb_cache_discard()
1512 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block); in msb_cache_discard()
1515 static int msb_cache_init(struct msb_data *msb) in msb_cache_init() argument
1517 setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer, in msb_cache_init()
1518 (unsigned long)msb); in msb_cache_init()
1520 if (!msb->cache) in msb_cache_init()
1521 msb->cache = kzalloc(msb->block_size, GFP_KERNEL); in msb_cache_init()
1522 if (!msb->cache) in msb_cache_init()
1525 msb_cache_discard(msb); in msb_cache_init()
1529 static int msb_cache_flush(struct msb_data *msb) in msb_cache_flush() argument
1536 if (msb->read_only) in msb_cache_flush()
1539 if (msb->cache_block_lba == MS_BLOCK_INVALID) in msb_cache_flush()
1542 lba = msb->cache_block_lba; in msb_cache_flush()
1543 pba = msb->lba_to_pba_table[lba]; in msb_cache_flush()
1546 pba, msb->cache_block_lba); in msb_cache_flush()
1548 sg_init_one(&sg, msb->cache , msb->block_size); in msb_cache_flush()
1551 for (page = 0; page < msb->pages_in_block; page++) { in msb_cache_flush()
1553 if (test_bit(page, &msb->valid_cache_bitmap)) in msb_cache_flush()
1556 offset = page * msb->page_size; in msb_cache_flush()
1560 error = msb_read_page(msb, pba, page, &extra, &sg, offset); in msb_cache_flush()
1577 set_bit(page, &msb->valid_cache_bitmap); in msb_cache_flush()
1581 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0); in msb_cache_flush()
1582 pba = msb->lba_to_pba_table[msb->cache_block_lba]; in msb_cache_flush()
1586 for (page = 0; page < msb->pages_in_block; page++) { in msb_cache_flush()
1588 if (test_bit(page, &msb->valid_cache_bitmap)) in msb_cache_flush()
1593 msb_set_overwrite_flag(msb, in msb_cache_flush()
1598 msb_cache_discard(msb); in msb_cache_flush()
1602 static int msb_cache_write(struct msb_data *msb, int lba, in msb_cache_write() argument
1608 if (msb->read_only) in msb_cache_write()
1611 if (msb->cache_block_lba == MS_BLOCK_INVALID || in msb_cache_write()
1612 lba != msb->cache_block_lba) in msb_cache_write()
1617 if (msb->cache_block_lba != MS_BLOCK_INVALID && in msb_cache_write()
1618 lba != msb->cache_block_lba) { in msb_cache_write()
1620 error = msb_cache_flush(msb); in msb_cache_write()
1625 if (msb->cache_block_lba == MS_BLOCK_INVALID) { in msb_cache_write()
1626 msb->cache_block_lba = lba; in msb_cache_write()
1627 mod_timer(&msb->cache_flush_timer, in msb_cache_write()
1634 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size); in msb_cache_write()
1637 msb->cache + page * msb->page_size, msb->page_size); in msb_cache_write()
1639 set_bit(page, &msb->valid_cache_bitmap); in msb_cache_write()
1643 static int msb_cache_read(struct msb_data *msb, int lba, in msb_cache_read() argument
1646 int pba = msb->lba_to_pba_table[lba]; in msb_cache_read()
1650 if (lba == msb->cache_block_lba && in msb_cache_read()
1651 test_bit(page, &msb->valid_cache_bitmap)) { in msb_cache_read()
1658 offset, msb->page_size); in msb_cache_read()
1660 msb->cache + msb->page_size * page, in msb_cache_read()
1661 msb->page_size); in msb_cache_read()
1666 error = msb_read_page(msb, pba, page, NULL, sg, offset); in msb_cache_read()
1670 msb_cache_write(msb, lba, page, true, sg, offset); in msb_cache_read()
1695 struct msb_data *msb = memstick_get_drvdata(card); in msb_init_card() local
1700 msb->caps = 0; in msb_init_card()
1704 msb->read_only = true; in msb_init_card()
1706 msb->state = -1; in msb_init_card()
1707 error = msb_reset(msb, false); in msb_init_card()
1715 msb_switch_to_parallel(msb); in msb_init_card()
1717 msb->page_size = sizeof(struct ms_boot_page); in msb_init_card()
1720 error = msb_read_boot_blocks(msb); in msb_init_card()
1724 boot_block = &msb->boot_page[0]; in msb_init_card()
1727 msb->block_count = boot_block->attr.number_of_blocks; in msb_init_card()
1728 msb->page_size = boot_block->attr.page_size; in msb_init_card()
1730 msb->pages_in_block = boot_block->attr.block_size * 2; in msb_init_card()
1731 msb->block_size = msb->page_size * msb->pages_in_block; in msb_init_card()
1733 if (msb->page_size > PAGE_SIZE) { in msb_init_card()
1735 dbg("device page %d size isn't supported", msb->page_size); in msb_init_card()
1739 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL); in msb_init_card()
1740 if (!msb->block_buffer) in msb_init_card()
1743 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20; in msb_init_card()
1750 msb->geometry.cylinders = chs_table[i].cyl; in msb_init_card()
1751 msb->geometry.heads = chs_table[i].head; in msb_init_card()
1752 msb->geometry.sectors = chs_table[i].sec; in msb_init_card()
1757 msb->caps |= MEMSTICK_CAP_PAR4; in msb_init_card()
1760 msb->read_only = true; in msb_init_card()
1762 dbg("Total block count = %d", msb->block_count); in msb_init_card()
1763 dbg("Each block consists of %d pages", msb->pages_in_block); in msb_init_card()
1764 dbg("Page size = %d bytes", msb->page_size); in msb_init_card()
1765 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4)); in msb_init_card()
1766 dbg("Read only: %d", msb->read_only); in msb_init_card()
1770 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4) in msb_init_card()
1771 msb_switch_to_parallel(msb); in msb_init_card()
1774 error = msb_cache_init(msb); in msb_init_card()
1778 error = msb_ftl_initialize(msb); in msb_init_card()
1784 error = msb_read_bad_block_table(msb, 0); in msb_init_card()
1788 error = msb_read_bad_block_table(msb, 1); in msb_init_card()
1795 error = msb_ftl_scan(msb); in msb_init_card()
1805 static int msb_do_write_request(struct msb_data *msb, int lba, in msb_do_write_request() argument
1813 if (page == 0 && len - offset >= msb->block_size) { in msb_do_write_request()
1815 if (msb->cache_block_lba == lba) in msb_do_write_request()
1816 msb_cache_discard(msb); in msb_do_write_request()
1819 error = msb_update_block(msb, lba, sg, offset); in msb_do_write_request()
1823 offset += msb->block_size; in msb_do_write_request()
1824 *sucessfuly_written += msb->block_size; in msb_do_write_request()
1829 error = msb_cache_write(msb, lba, page, false, sg, offset); in msb_do_write_request()
1833 offset += msb->page_size; in msb_do_write_request()
1834 *sucessfuly_written += msb->page_size; in msb_do_write_request()
1837 if (page == msb->pages_in_block) { in msb_do_write_request()
1845 static int msb_do_read_request(struct msb_data *msb, int lba, in msb_do_read_request() argument
1854 error = msb_cache_read(msb, lba, page, sg, offset); in msb_do_read_request()
1858 offset += msb->page_size; in msb_do_read_request()
1859 *sucessfuly_read += msb->page_size; in msb_do_read_request()
1862 if (page == msb->pages_in_block) { in msb_do_read_request()
1872 struct msb_data *msb = container_of(work, struct msb_data, io_work); in msb_io_work() local
1876 struct scatterlist *sg = msb->prealloc_sg; in msb_io_work()
1881 spin_lock_irqsave(&msb->q_lock, flags); in msb_io_work()
1883 if (msb->need_flush_cache) { in msb_io_work()
1884 msb->need_flush_cache = false; in msb_io_work()
1885 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_io_work()
1886 msb_cache_flush(msb); in msb_io_work()
1890 if (!msb->req) { in msb_io_work()
1891 msb->req = blk_fetch_request(msb->queue); in msb_io_work()
1892 if (!msb->req) { in msb_io_work()
1894 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_io_work()
1899 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_io_work()
1902 if (!msb->req) in msb_io_work()
1907 blk_rq_map_sg(msb->queue, msb->req, sg); in msb_io_work()
1909 lba = blk_rq_pos(msb->req); in msb_io_work()
1911 sector_div(lba, msb->page_size / 512); in msb_io_work()
1912 page = do_div(lba, msb->pages_in_block); in msb_io_work()
1914 if (rq_data_dir(msb->req) == READ) in msb_io_work()
1915 error = msb_do_read_request(msb, lba, page, sg, in msb_io_work()
1916 blk_rq_bytes(msb->req), &len); in msb_io_work()
1918 error = msb_do_write_request(msb, lba, page, sg, in msb_io_work()
1919 blk_rq_bytes(msb->req), &len); in msb_io_work()
1921 spin_lock_irqsave(&msb->q_lock, flags); in msb_io_work()
1924 if (!__blk_end_request(msb->req, 0, len)) in msb_io_work()
1925 msb->req = NULL; in msb_io_work()
1927 if (error && msb->req) { in msb_io_work()
1929 if (!__blk_end_request(msb->req, error, msb->page_size)) in msb_io_work()
1930 msb->req = NULL; in msb_io_work()
1933 if (msb->req) in msb_io_work()
1936 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_io_work()
1946 struct msb_data *msb = disk->private_data; in msb_bd_open() local
1952 if (msb && msb->card) in msb_bd_open()
1953 msb->usage_count++; in msb_bd_open()
1959 static void msb_data_clear(struct msb_data *msb) in msb_data_clear() argument
1961 kfree(msb->boot_page); in msb_data_clear()
1962 kfree(msb->used_blocks_bitmap); in msb_data_clear()
1963 kfree(msb->lba_to_pba_table); in msb_data_clear()
1964 kfree(msb->cache); in msb_data_clear()
1965 msb->card = NULL; in msb_data_clear()
1970 struct msb_data *msb = disk->private_data; in msb_disk_release() local
1975 if (msb) { in msb_disk_release()
1976 if (msb->usage_count) in msb_disk_release()
1977 msb->usage_count--; in msb_disk_release()
1979 if (!msb->usage_count) { in msb_disk_release()
1981 idr_remove(&msb_disk_idr, msb->disk_id); in msb_disk_release()
1983 kfree(msb); in msb_disk_release()
1998 struct msb_data *msb = bdev->bd_disk->private_data; in msb_bd_getgeo() local
1999 *geo = msb->geometry; in msb_bd_getgeo()
2017 struct msb_data *msb = memstick_get_drvdata(card); in msb_submit_req() local
2022 if (msb->card_dead) { in msb_submit_req()
2025 WARN_ON(!msb->io_queue_stopped); in msb_submit_req()
2032 if (msb->req) in msb_submit_req()
2035 if (!msb->io_queue_stopped) in msb_submit_req()
2036 queue_work(msb->io_queue, &msb->io_work); in msb_submit_req()
2041 struct msb_data *msb = memstick_get_drvdata(card); in msb_check_card() local
2042 return (msb->card_dead == 0); in msb_check_card()
2047 struct msb_data *msb = memstick_get_drvdata(card); in msb_stop() local
2052 spin_lock_irqsave(&msb->q_lock, flags); in msb_stop()
2053 blk_stop_queue(msb->queue); in msb_stop()
2054 msb->io_queue_stopped = true; in msb_stop()
2055 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_stop()
2057 del_timer_sync(&msb->cache_flush_timer); in msb_stop()
2058 flush_workqueue(msb->io_queue); in msb_stop()
2060 if (msb->req) { in msb_stop()
2061 spin_lock_irqsave(&msb->q_lock, flags); in msb_stop()
2062 blk_requeue_request(msb->queue, msb->req); in msb_stop()
2063 msb->req = NULL; in msb_stop()
2064 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_stop()
2071 struct msb_data *msb = memstick_get_drvdata(card); in msb_start() local
2076 msb_invalidate_reg_window(msb); in msb_start()
2078 spin_lock_irqsave(&msb->q_lock, flags); in msb_start()
2079 if (!msb->io_queue_stopped || msb->card_dead) { in msb_start()
2080 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_start()
2083 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_start()
2086 msb->need_flush_cache = true; in msb_start()
2087 msb->io_queue_stopped = false; in msb_start()
2089 spin_lock_irqsave(&msb->q_lock, flags); in msb_start()
2090 blk_start_queue(msb->queue); in msb_start()
2091 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_start()
2093 queue_work(msb->io_queue, &msb->io_work); in msb_start()
2107 struct msb_data *msb = memstick_get_drvdata(card); in msb_init_disk() local
2117 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL); in msb_init_disk()
2120 if (msb->disk_id < 0) in msb_init_disk()
2121 return msb->disk_id; in msb_init_disk()
2123 msb->disk = alloc_disk(0); in msb_init_disk()
2124 if (!msb->disk) { in msb_init_disk()
2129 msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock); in msb_init_disk()
2130 if (!msb->queue) { in msb_init_disk()
2135 msb->queue->queuedata = card; in msb_init_disk()
2136 blk_queue_prep_rq(msb->queue, msb_prepare_req); in msb_init_disk()
2138 blk_queue_bounce_limit(msb->queue, limit); in msb_init_disk()
2139 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES); in msb_init_disk()
2140 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS); in msb_init_disk()
2141 blk_queue_max_segment_size(msb->queue, in msb_init_disk()
2142 MS_BLOCK_MAX_PAGES * msb->page_size); in msb_init_disk()
2143 blk_queue_logical_block_size(msb->queue, msb->page_size); in msb_init_disk()
2145 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id); in msb_init_disk()
2146 msb->disk->fops = &msb_bdops; in msb_init_disk()
2147 msb->disk->private_data = msb; in msb_init_disk()
2148 msb->disk->queue = msb->queue; in msb_init_disk()
2149 msb->disk->driverfs_dev = &card->dev; in msb_init_disk()
2150 msb->disk->flags |= GENHD_FL_EXT_DEVT; in msb_init_disk()
2152 capacity = msb->pages_in_block * msb->logical_block_count; in msb_init_disk()
2153 capacity *= (msb->page_size / 512); in msb_init_disk()
2154 set_capacity(msb->disk, capacity); in msb_init_disk()
2157 msb->usage_count = 1; in msb_init_disk()
2158 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM); in msb_init_disk()
2159 INIT_WORK(&msb->io_work, msb_io_work); in msb_init_disk()
2160 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); in msb_init_disk()
2162 if (msb->read_only) in msb_init_disk()
2163 set_disk_ro(msb->disk, 1); in msb_init_disk()
2166 add_disk(msb->disk); in msb_init_disk()
2171 put_disk(msb->disk); in msb_init_disk()
2174 idr_remove(&msb_disk_idr, msb->disk_id); in msb_init_disk()
2181 struct msb_data *msb; in msb_probe() local
2184 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL); in msb_probe()
2185 if (!msb) in msb_probe()
2187 memstick_set_drvdata(card, msb); in msb_probe()
2188 msb->card = card; in msb_probe()
2189 spin_lock_init(&msb->q_lock); in msb_probe()
2204 msb_data_clear(msb); in msb_probe()
2205 kfree(msb); in msb_probe()
2211 struct msb_data *msb = memstick_get_drvdata(card); in msb_remove() local
2214 if (!msb->io_queue_stopped) in msb_remove()
2220 spin_lock_irqsave(&msb->q_lock, flags); in msb_remove()
2221 msb->card_dead = true; in msb_remove()
2222 blk_start_queue(msb->queue); in msb_remove()
2223 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_remove()
2226 del_gendisk(msb->disk); in msb_remove()
2227 blk_cleanup_queue(msb->queue); in msb_remove()
2228 msb->queue = NULL; in msb_remove()
2231 msb_data_clear(msb); in msb_remove()
2234 msb_disk_release(msb->disk); in msb_remove()
2248 struct msb_data *msb = memstick_get_drvdata(card); in msb_resume() local
2253 msb->card_dead = true; in msb_resume()
2265 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); in msb_resume()
2270 if (msb->block_size != new_msb->block_size) in msb_resume()
2273 if (memcmp(msb->boot_page, new_msb->boot_page, in msb_resume()
2277 if (msb->logical_block_count != new_msb->logical_block_count || in msb_resume()
2278 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table, in msb_resume()
2279 msb->logical_block_count)) in msb_resume()
2282 if (msb->block_count != new_msb->block_count || in msb_resume()
2283 memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap, in msb_resume()
2284 msb->block_count / 8)) in msb_resume()
2292 msb->card_dead = card_dead; in msb_resume()
2293 memstick_set_drvdata(card, msb); in msb_resume()