Searched refs:sector_div (Results 1 - 46 of 46) sorted by relevance

/linux-4.1.27/drivers/md/
H A Ddm-stripe.c120 if (sector_div(width, stripes)) { stripe_ctr()
127 if (sector_div(tmp_len, chunk_size)) { stripe_ctr()
218 chunk_offset = sector_div(chunk, sc->chunk_size); stripe_map_sector()
225 *stripe = sector_div(chunk, sc->stripes); stripe_map_sector()
251 *result -= sector_div(sector, sc->chunk_size); stripe_map_range_sector()
H A Draid0.c98 sector_div(sectors, mddev->chunk_sectors); rdev_for_each()
324 sector_div(chunk, zone->nb_dev << chunksect_bits); map_sector()
326 sect_in_chunk = sector_div(sector, chunk_sects); map_sector()
328 sector_div(chunk, chunk_sects * zone->nb_dev); map_sector()
337 + sector_div(sector, zone->nb_dev)]; map_sector()
366 max = (chunk_sectors - (sector_div(sector, chunk_sectors) raid0_mergeable_bvec()
505 return chunk_sects >= (sector_div(sector, chunk_sects) is_io_in_chunk_boundary()
528 : sector_div(sector, chunk_sects)); raid0_make_request()
530 /* Restore due to sector_div */ raid0_make_request()
H A Ddm-switch.c99 if (sector_div(nr_regions, sctx->region_size)) alloc_region_table()
109 if (sector_div(nr_slots, sctx->region_entries_per_slot)) alloc_region_table()
163 sector_div(p, sctx->region_size); switch_get_path_nr()
H A Dlinear.c154 sector_div(sectors, mddev->chunk_sectors); rdev_for_each()
H A Draid5.c768 if (!sector_div(tmp_sec, conf->chunk_sectors)) stripe_add_to_batch_list()
2559 chunk_offset = sector_div(r_sector, sectors_per_chunk); raid5_compute_sector()
2566 *dd_idx = sector_div(stripe, data_disks); raid5_compute_sector()
2579 pd_idx = data_disks - sector_div(stripe2, raid_disks); raid5_compute_sector()
2584 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector()
2589 pd_idx = data_disks - sector_div(stripe2, raid_disks); raid5_compute_sector()
2593 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector()
2611 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); raid5_compute_sector()
2620 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector()
2629 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); raid5_compute_sector()
2634 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector()
2653 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector()
2669 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); raid5_compute_sector()
2681 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); raid5_compute_sector()
2689 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); raid5_compute_sector()
2696 pd_idx = sector_div(stripe2, raid_disks-1); raid5_compute_sector()
2703 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); raid5_compute_sector()
2709 pd_idx = sector_div(stripe2, raid_disks-1); raid5_compute_sector()
2755 chunk_offset = sector_div(new_sector, sectors_per_chunk); compute_blocknr()
3062 int chunk_offset = sector_div(stripe, sectors_per_chunk); stripe_set_idx()
5078 sector_div(last_sector, stripe_sectors); make_discard_request()
5361 sector_div(sector_nr, new_data_disks); reshape_request()
5386 sector_div(writepos, new_data_disks); reshape_request()
5388 sector_div(readpos, data_disks); reshape_request()
5390 sector_div(safepos, data_disks); reshape_request()
6703 if (sector_div(here_new, mddev->new_chunk_sectors *
6712 sector_div(here_old, mddev->chunk_sectors *
7543 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); raid45_takeover_raid0()
H A Draid10.c574 dev = sector_div(stripe, geo->raid_disks); __raid10_find_phys()
656 fc = sector_div(chunk, geo->far_copies); raid10_find_virt()
671 sector_div(vchunk, geo->near_copies); raid10_find_virt()
3429 sector_div(size, conf->geo.far_copies); raid10_size()
3431 sector_div(size, conf->geo.near_copies); raid10_size()
3444 sector_div(size, conf->geo.far_copies); calc_sectors()
3446 sector_div(size, conf->geo.near_copies); calc_sectors()
3461 sector_div(size, conf->geo.far_copies); calc_sectors()
3880 sector_div(size, devs); raid10_takeover_raid0()
4215 sector_div(s, geo->raid_disks); first_dev_address()
H A Ddm-cache-target.c726 sector_div(bi_sector, cache->sectors_per_block); remap_to_cache()
773 (void) sector_div(block_nr, cache->sectors_per_block); get_bio_block()
2319 (void) sector_div(origin_size, discard_block_size); too_many_discard_blocks()
2895 sector_div(e, li->cache->discard_block_size); set_discard_range()
2941 (void) sector_div(size, cache->sectors_per_block); get_cache_dev_size()
H A Ddm-snap-persistent.c275 if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) skip_metadata()
H A Ddm-raid.c717 sector_div(sectors_per_dev, rs->md.raid_disks); parse_raid_params()
723 sector_div(sectors_per_dev, parse_raid_params()
H A Ddm-thin.c558 (void) sector_div(block_nr, pool->sectors_per_block); get_bio_block()
575 sector_div(bi_sector, pool->sectors_per_block); remap()
2417 return !sector_div(block_size, n); is_factor()
2805 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE); get_metadata_dev_size_in_blocks()
3013 (void) sector_div(data_size, pool->sectors_per_block); maybe_resize_data_dev()
4036 (void) sector_div(blocks, pool->sectors_per_block); thin_iterate_devices()
H A Ddm-era-target.c1189 (void) sector_div(block_nr, era->sectors_per_block); get_block()
H A Dmd.c4500 if (sector_div(temp, chunk)) max_sync_store()
7104 * u32, as those are the requirements for sector_div. status_resync()
7113 sector_div(res, (u32)((max_sectors>>scale)+1)); status_resync()
7157 sector_div(rt, db/32+1); status_resync()
H A Dbitmap.c582 sector_div(bm_blocks, bitmap_read_sb()
H A Ddm.c1363 max_len = sector_div(offset, ti->max_io_len); max_io_len()
/linux-4.1.27/drivers/scsi/aic7xxx/
H A Daiclib.h139 /* ugly, ugly sector_div calling convention.. */ aic_sector_div()
140 sector_div(capacity, (heads * sectors)); aic_sector_div()
/linux-4.1.27/block/
H A Dblk-lib.c104 sector_div(tmp, granularity) != alignment) { blkdev_issue_discard()
106 sector_div(end_sect, granularity); blkdev_issue_discard()
/linux-4.1.27/fs/nilfs2/
H A Dthe_nilfs.h341 sector_div(segnum, nilfs->ns_blocks_per_segment); nilfs_get_segnum_of_block()
/linux-4.1.27/drivers/block/
H A Dnull_blk.c579 sector_div(size, bs); null_add_dev()
H A Dsunvdc.c118 sector_div(cylinders, geo->heads * geo->sectors); vdc_getgeo()
H A Dcciss.c451 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); cciss_seq_show()
453 sector_div(vol_sz_frac, ENG_GIG_FACTOR); cciss_seq_show()
2875 unsigned long rem = sector_div(real_size, t); cciss_geometry_inquiry()
H A Dxen-blkfront.c342 sector_div(cylinders, hg->heads * hg->sectors); blkif_getgeo()
/linux-4.1.27/drivers/block/paride/
H A Dpf.c335 geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT); pf_getgeo()
339 geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT); pf_getgeo()
/linux-4.1.27/include/linux/
H A Dkernel.h137 # define sector_div(a, b) do_div(a, b) macro
139 # define sector_div(n, b)( \ macro
H A Ddevice-mapper.h579 sector_div(_r, (sz)); \
H A Dblkdev.h1280 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; queue_limit_alignment_offset()
1320 offset = sector_div(sector, granularity); queue_limit_discard_alignment()
/linux-4.1.27/kernel/
H A Dacct.c60 #include <linux/blkdev.h> /* sector_div */
/linux-4.1.27/drivers/s390/block/
H A Ddasd_eckd.c2697 unsigned int recoffs = sector_div(trkid, blk_per_trk); rq_for_each_segment()
2854 recoffs = sector_div(trkid, blk_per_trk); rq_for_each_segment()
3166 offs = sector_div(trkid, blk_per_trk); rq_for_each_segment()
3252 first_offs = sector_div(first_trk, blk_per_trk); dasd_eckd_build_cp()
3255 last_offs = sector_div(last_trk, blk_per_trk); dasd_eckd_build_cp()
/linux-4.1.27/drivers/scsi/
H A Dmvumi.c2206 sector_div(cylinders, tmp); mvumi_bios_param()
2213 sector_div(cylinders, tmp); mvumi_bios_param()
H A D3w-9xxx.c1706 cylinders = sector_div(capacity, heads * sectors); twa_scsi_biosparam()
1710 cylinders = sector_div(capacity, heads * sectors); twa_scsi_biosparam()
H A Dwd719x.c548 geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ wd719x_biosparam()
H A Daha1542.c935 geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ aha1542_biosparam()
H A D3w-xxxx.c1341 cylinders = sector_div(capacity, heads * sectors); tw_scsi_biosparam()
1346 cylinders = sector_div(capacity, heads * sectors); tw_scsi_biosparam()
H A Dstex.c1358 sector_div(capacity, heads * sectors); stex_biosparam()
H A Dstorvsc_drv.c1457 sector_div(cylinders, heads * sectors_pt); storvsc_get_chs()
H A D3w-sas.c1428 geom[2] = sector_div(capacity, heads * sectors); /* cylinders */ twl_scsi_biosparam()
H A Ddpt_i2o.c517 cylinders = sector_div(capacity, heads * sectors); adpt_bios_param()
H A Dipr.c4614 sector_div(cylinders, (128 * 32)); ipr_biosparam()
/linux-4.1.27/drivers/scsi/libsas/
H A Dsas_scsi_host.c915 sector_div(capacity, 255*63); sas_bios_param()
/linux-4.1.27/drivers/message/fusion/
H A Dmptscsih.c2103 sector_div(cylinders,dummy); mptscsih_bios_param()
2114 sector_div(cylinders,dummy); mptscsih_bios_param()
/linux-4.1.27/drivers/memstick/core/
H A Dms_block.c1911 sector_div(lba, msb->page_size / 512); msb_io_work()
/linux-4.1.27/drivers/scsi/aacraid/
H A Daacraid.h2092 sector_div(capacity, divisor); cap_to_cyls()
/linux-4.1.27/drivers/scsi/mpt2sas/
H A Dmpt2sas_scsih.c2155 sector_div(cylinders, dummy); _scsih_bios_param()
2166 sector_div(cylinders, dummy); _scsih_bios_param()
3873 column = sector_div(p_lba, num_pds); _scsih_setup_direct_io()
/linux-4.1.27/drivers/scsi/megaraid/
H A Dmegaraid_sas_base.c2561 sector_div(cylinders, tmp); megasas_bios_param()
2572 sector_div(cylinders, tmp); megasas_bios_param()
/linux-4.1.27/drivers/scsi/mpt3sas/
H A Dmpt3sas_scsih.c1822 sector_div(cylinders, dummy); _scsih_bios_param()
1833 sector_div(cylinders, dummy); _scsih_bios_param()
/linux-4.1.27/drivers/block/mtip32xx/
H A Dmtip32xx.c3639 sector_div(capacity, (geo->heads * geo->sectors)); mtip_block_getgeo()
/linux-4.1.27/drivers/ata/
H A Dlibata-scsi.c397 sector_div(capacity, 255*63); ata_std_bios_param()

Completed in 2601 milliseconds