Home
last modified time | relevance | path

Searched refs:sector (Results 1 – 179 of 179) sorted by relevance

/linux-4.1.27/drivers/block/drbd/
Ddrbd_interval.c25 sector_t max = node->sector + (node->size >> 9); in compute_subtree_last()
50 sector_t this_end = this->sector + (this->size >> 9); in drbd_insert_interval()
61 if (this->sector < here->sector) in drbd_insert_interval()
63 else if (this->sector > here->sector) in drbd_insert_interval()
90 drbd_contains_interval(struct rb_root *root, sector_t sector, in drbd_contains_interval() argument
99 if (sector < here->sector) in drbd_contains_interval()
101 else if (sector > here->sector) in drbd_contains_interval()
134 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) in drbd_find_overlap() argument
138 sector_t end = sector + (size >> 9); in drbd_find_overlap()
147 sector < interval_end(node->rb_left)) { in drbd_find_overlap()
[all …]
Ddrbd_actlog.c140 sector_t sector, int rw) in _drbd_md_sync_page_io() argument
156 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io()
191 sector_t sector, int rw) in drbd_md_sync_page_io() argument
200 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", in drbd_md_sync_page_io()
203 if (sector < drbd_md_first_sector(bdev) || in drbd_md_sync_page_io()
204 sector + 7 > drbd_md_last_sector(bdev)) in drbd_md_sync_page_io()
207 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); in drbd_md_sync_page_io()
209 err = _drbd_md_sync_page_io(device, bdev, sector, rw); in drbd_md_sync_page_io()
212 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err); in drbd_md_sync_page_io()
256 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); in drbd_al_begin_io_fastpath()
[all …]
Ddrbd_interval.h9 sector_t sector; /* start sector of the interval */ member
37 #define drbd_for_each_overlap(i, root, sector, size) \ argument
38 for (i = drbd_find_overlap(root, sector, size); \
40 i = drbd_next_overlap(i, sector, size))
Ddrbd_worker.c158 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final()
185 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio()
190 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio()
360 sector_t sector = peer_req->i.sector; in w_e_send_csum() local
371 err = drbd_send_drequest_csum(peer_device, sector, size, in w_e_send_csum()
391 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) in read_for_csum() argument
401 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum()
591 sector_t sector; in make_resync_request() local
650 sector = BM_BIT_TO_SECT(bit); in make_resync_request()
652 if (drbd_try_rs_begin_io(device, sector)) { in make_resync_request()
[all …]
Ddrbd_receiver.c341 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in drbd_alloc_peer_req() argument
369 peer_req->i.sector = sector; in drbd_alloc_peer_req()
1373 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local
1391 sector, data_size >> 9, GFP_NOIO, false)) in drbd_submit_peer_request()
1418 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request()
1450 sector += len >> 9; in drbd_submit_peer_request()
1587 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in read_in_block() argument
1627 if (sector + (data_size>>9) > capacity) { in read_in_block()
1631 (unsigned long long)sector, data_size); in read_in_block()
1638 peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO); in read_in_block()
[all …]
Ddrbd_req.c34 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
67 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new()
134 s, (unsigned long long)req->i.sector, req->i.size); in drbd_req_destroy()
153 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in drbd_req_destroy()
156 drbd_set_in_sync(device, req->i.sector, req->i.size); in drbd_req_destroy()
176 (unsigned long long) req->i.sector, req->i.size); in drbd_req_destroy()
547 (unsigned long long)req->i.sector, in drbd_report_io_error()
641 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in __req_mod()
911 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) in drbd_may_do_local_read() argument
920 esector = sector + (size >> 9) - 1; in drbd_may_do_local_read()
[all …]
Ddrbd_protocol.h121 u64 sector; /* 64 bits sector number */ member
141 u64 sector; member
148 u64 sector; member
271 u64 sector; member
Ddrbd_int.h1100 sector_t sector, int blksize, u64 block_id);
1106 sector_t sector, int size, u64 block_id);
1107 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1110 extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1497 struct drbd_backing_dev *bdev, sector_t sector, int rw);
1541 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1632 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1633 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1634 extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1638 sector_t sector, int size);
[all …]
Ddrbd_main.c1316 u64 sector, u32 blksize, u64 block_id) in _drbd_send_ack() argument
1328 p->sector = sector; in _drbd_send_ack()
1343 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size), in drbd_send_ack_dp()
1350 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id); in drbd_send_ack_rp()
1363 cpu_to_be64(peer_req->i.sector), in drbd_send_ack()
1371 sector_t sector, int blksize, u64 block_id) in drbd_send_ack_ex() argument
1374 cpu_to_be64(sector), in drbd_send_ack_ex()
1380 sector_t sector, int size, u64 block_id) in drbd_send_drequest() argument
1389 p->sector = cpu_to_be64(sector); in drbd_send_drequest()
1395 int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size, in drbd_send_drequest_csum() argument
[all …]
Ddrbd_debugfs.c103 (unsigned long long)req->i.sector, req->i.size >> 9, in seq_print_one_request()
265 (unsigned long long)peer_req->i.sector, peer_req->i.size >> 9, in seq_print_peer_request()
/linux-4.1.27/drivers/block/
Dbrd.c55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page() argument
72 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ in brd_lookup_page()
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) in brd_insert_page() argument
92 page = brd_lookup_page(brd, sector); in brd_insert_page()
119 idx = sector >> PAGE_SECTORS_SHIFT; in brd_insert_page()
134 static void brd_free_page(struct brd_device *brd, sector_t sector) in brd_free_page() argument
140 idx = sector >> PAGE_SECTORS_SHIFT; in brd_free_page()
147 static void brd_zero_page(struct brd_device *brd, sector_t sector) in brd_zero_page() argument
151 page = brd_lookup_page(brd, sector); in brd_zero_page()
196 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) in copy_to_brd_setup() argument
[all …]
Dpmem.c44 sector_t sector) in pmem_do_bvec() argument
47 size_t pmem_off = sector << 9; in pmem_do_bvec()
66 sector_t sector; in pmem_make_request() local
78 sector = bio->bi_iter.bi_sector; in pmem_make_request()
81 rw, sector); in pmem_make_request()
82 sector += bvec.bv_len >> 9; in pmem_make_request()
89 static int pmem_rw_page(struct block_device *bdev, sector_t sector, in pmem_rw_page() argument
94 pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); in pmem_rw_page()
100 static long pmem_direct_access(struct block_device *bdev, sector_t sector, in pmem_direct_access() argument
104 size_t offset = sector << 9; in pmem_direct_access()
Dswim.c36 unsigned char sector; member
461 int sector, unsigned char *buffer) in swim_read_sector() argument
478 if (!ret && (header.sector == sector)) { in swim_read_sector()
490 (header.sector != sector)) in swim_read_sector()
502 int side, track, sector; in floppy_read_sectors() local
512 sector = x % fs->secpertrack + 1; in floppy_read_sectors()
516 ret = swim_read_sector(fs, side, track, sector, in floppy_read_sectors()
Dpktcdvd.c109 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) in get_zone() argument
111 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); in get_zone()
986 bio, (unsigned long long)pkt->sector, in pkt_end_io_read()
1035 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / in pkt_gather_data()
1048 (unsigned long long)pkt->sector); in pkt_gather_data()
1063 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1083 frames_read, (unsigned long long)pkt->sector); in pkt_gather_data()
1097 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { in pkt_get_packet_data()
1099 if (pkt->sector != zone) in pkt_get_packet_data()
1150 old_block = pkt->sector / (CD_FRAMESIZE >> 9); in pkt_start_recovery()
[all …]
Dvirtio_blk.c177 vbr->out_hdr.sector = 0; in virtio_queue_rq()
183 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req)); in virtio_queue_rq()
188 vbr->out_hdr.sector = 0; in virtio_queue_rq()
193 vbr->out_hdr.sector = 0; in virtio_queue_rq()
Damiflop.c1376 unsigned int cnt, block, track, sector; in redo_fd_request() local
1408 sector = block % (floppy->dtype->sects * floppy->type->sect_mult); in redo_fd_request()
1412 "0x%08lx\n", track, sector, data); in redo_fd_request()
1421 memcpy(data, floppy->trackbuf + sector * 512, 512); in redo_fd_request()
1423 memcpy(floppy->trackbuf + sector * 512, data, 512); in redo_fd_request()
Dswim3.c79 REG(sector); /* sector # to read or write */
474 out_8(&sw->sector, fs->req_sector); in setup_transfer()
/linux-4.1.27/include/trace/events/
Dblock.h22 __field( sector_t, sector )
28 __entry->sector = bh->b_blocknr;
34 (unsigned long long)__entry->sector, __entry->size
72 __field( sector_t, sector )
81 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
94 (unsigned long long)__entry->sector,
152 __field( sector_t, sector )
161 __entry->sector = blk_rq_pos(rq);
172 (unsigned long long)__entry->sector,
184 __field( sector_t, sector )
[all …]
Dbcache.h17 __field(sector_t, sector )
27 __entry->sector = bio->bi_iter.bi_sector;
35 __entry->rwbs, (unsigned long long)__entry->sector,
95 __field(sector_t, sector )
102 __entry->sector = bio->bi_iter.bi_sector;
109 (unsigned long long)__entry->sector, __entry->nr_sector)
128 __field(sector_t, sector )
137 __entry->sector = bio->bi_iter.bi_sector;
146 __entry->rwbs, (unsigned long long)__entry->sector,
158 __field(sector_t, sector )
[all …]
Df2fs.h749 __field(sector_t, sector)
757 __entry->sector = bio->bi_iter.bi_sector;
765 (unsigned long long)__entry->sector,
/linux-4.1.27/block/
Dblk-lib.c40 int blkdev_issue_discard(struct block_device *bdev, sector_t sector, in blkdev_issue_discard() argument
101 end_sect = sector + req_sects; in blkdev_issue_discard()
108 req_sects = end_sect - sector; in blkdev_issue_discard()
111 bio->bi_iter.bi_sector = sector; in blkdev_issue_discard()
118 sector = end_sect; in blkdev_issue_discard()
155 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, in blkdev_issue_write_same() argument
185 bio->bi_iter.bi_sector = sector; in blkdev_issue_write_same()
197 sector += max_write_same_sectors; in blkdev_issue_write_same()
229 static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, in __blkdev_issue_zeroout() argument
251 bio->bi_iter.bi_sector = sector; in __blkdev_issue_zeroout()
[all …]
Ddeadline-iosched.c135 sector_t sector = bio_end_sector(bio); in deadline_merge() local
137 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); in deadline_merge()
139 BUG_ON(sector != blk_rq_pos(__rq)); in deadline_merge()
Dgenhd.c194 static inline int sector_in_part(struct hd_struct *part, sector_t sector) in sector_in_part() argument
196 return part->start_sect <= sector && in sector_in_part()
197 sector < part->start_sect + part_nr_sects_read(part); in sector_in_part()
215 struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) in disk_map_sector_rcu() argument
224 if (part && sector_in_part(part, sector)) in disk_map_sector_rcu()
230 if (part && sector_in_part(part, sector)) { in disk_map_sector_rcu()
Delevator.c326 struct request *elv_rb_find(struct rb_root *root, sector_t sector) in elv_rb_find() argument
334 if (sector < blk_rq_pos(rq)) in elv_rb_find()
336 else if (sector > blk_rq_pos(rq)) in elv_rb_find()
Dcfq-iosched.c2102 sector_t sector, struct rb_node **ret_parent, in cfq_prio_tree_lookup() argument
2120 if (sector > blk_rq_pos(cfqq->next_rq)) in cfq_prio_tree_lookup()
2122 else if (sector < blk_rq_pos(cfqq->next_rq)) in cfq_prio_tree_lookup()
2592 sector_t sector = cfqd->last_position; in cfqq_close() local
2601 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); in cfqq_close()
2613 if (blk_rq_pos(__cfqq->next_rq) < sector) in cfqq_close()
Dblk-core.c1773 sector_t sector = bio->bi_iter.bi_sector; in bio_check_eod() local
1775 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { in bio_check_eod()
/linux-4.1.27/drivers/scsi/
Dsr_vendor.c161 unsigned long sector; in sr_cd_check() local
173 sector = 0; /* the multisession sector offset goes here */ in sr_cd_check()
199 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check()
203 sector = 0; in sr_cd_check()
231 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check()
259 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check()
260 if (sector) in sr_cd_check()
261 sector -= CD_MSF_OFFSET; in sr_cd_check()
297 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check()
307 sector = 0; in sr_cd_check()
[all …]
Dscsi_debug.c677 static struct sd_dif_tuple *dif_store(sector_t sector) in dif_store() argument
679 sector = do_div(sector, sdebug_store_sectors); in dif_store()
681 return dif_storep + sector; in dif_store()
2448 sector_t sector, u32 ei_lba) in dif_verify() argument
2455 (unsigned long)sector, in dif_verify()
2461 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { in dif_verify()
2463 __func__, (unsigned long)sector); in dif_verify()
2469 __func__, (unsigned long)sector); in dif_verify()
2475 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector, in dif_copy_prot() argument
2492 void *start = dif_store(sector); in dif_copy_prot()
[all …]
Dscsicam.c142 cyl = p->cyl + ((p->sector & 0xc0) << 2); in scsi_partsize()
Dsd.c695 sector_t sector = blk_rq_pos(rq); in sd_setup_discard_cmnd() local
703 sector >>= ilog2(sdp->sector_size) - 9; in sd_setup_discard_cmnd()
720 put_unaligned_be64(sector, &buf[8]); in sd_setup_discard_cmnd()
730 put_unaligned_be64(sector, &cmd->cmnd[2]); in sd_setup_discard_cmnd()
742 put_unaligned_be32(sector, &cmd->cmnd[2]); in sd_setup_discard_cmnd()
821 sector_t sector = blk_rq_pos(rq); in sd_setup_write_same_cmnd() local
831 sector >>= ilog2(sdp->sector_size) - 9; in sd_setup_write_same_cmnd()
836 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { in sd_setup_write_same_cmnd()
839 put_unaligned_be64(sector, &cmd->cmnd[2]); in sd_setup_write_same_cmnd()
844 put_unaligned_be32(sector, &cmd->cmnd[2]); in sd_setup_write_same_cmnd()
Dosst.c1378 int sector; in osst_get_sector() local
1392 sector = (STp->frame_in_buffer ? STp->first_frame_position-1 : in osst_get_sector()
1395 sector |= (STp->buffer->buffer_bytes >> OSST_SECTOR_SHIFT) & OSST_SECTOR_MASK; in osst_get_sector()
1397 sector |= (STp->buffer->read_pointer >> OSST_SECTOR_SHIFT) & OSST_SECTOR_MASK; in osst_get_sector()
1399 sector = osst_get_frame_position(STp, aSRpnt); in osst_get_sector()
1400 if (sector > 0) in osst_get_sector()
1401 sector <<= OSST_FRAME_SHIFT; in osst_get_sector()
1403 return sector; in osst_get_sector()
1406 static int osst_seek_sector(struct osst_tape * STp, struct osst_request ** aSRpnt, int sector) in osst_seek_sector() argument
1409 int frame = sector >> OSST_FRAME_SHIFT, in osst_seek_sector()
[all …]
Dlibiscsi.c840 sector_t sector; in iscsi_scsi_cmd_rsp() local
850 ascq = session->tt->check_protection(task, &sector); in iscsi_scsi_cmd_rsp()
861 put_unaligned_be64(sector, &sc->sense_buffer[12]); in iscsi_scsi_cmd_rsp()
/linux-4.1.27/drivers/md/
Draid0.c292 sector_t sector = *sectorp; in find_zone() local
295 if (sector < z[i].zone_end) { in find_zone()
297 *sectorp = sector - z[i-1].zone_end; in find_zone()
308 sector_t sector, sector_t *sector_offset) in map_sector() argument
319 sect_in_chunk = sector & (chunk_sects - 1); in map_sector()
320 sector >>= chunksect_bits; in map_sector()
326 sect_in_chunk = sector_div(sector, chunk_sects); in map_sector()
337 + sector_div(sector, zone->nb_dev)]; in map_sector()
353 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid0_mergeable_bvec() local
354 sector_t sector_offset = sector; in raid0_mergeable_bvec()
[all …]
Ddm-log-writes.c92 __le64 sector; member
119 sector_t sector; member
191 sector_t sector) in write_metadata() argument
204 bio->bi_iter.bi_sector = sector; in write_metadata()
241 struct pending_block *block, sector_t sector) in log_one_block() argument
248 entry.sector = cpu_to_le64(block->sector); in log_one_block()
253 block->datalen, sector)) { in log_one_block()
260 sector++; in log_one_block()
269 bio->bi_iter.bi_sector = sector; in log_one_block()
291 bio->bi_iter.bi_sector = sector; in log_one_block()
[all …]
Draid1.c293 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
358 (unsigned long long)r1_bio->sector); in raid1_end_read_request()
377 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, in close_write()
452 r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
513 const sector_t this_sector = r1_bio->sector; in read_balance()
716 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid1_mergeable_bvec() local
729 bvm->bi_sector = sector + in raid1_mergeable_bvec()
891 sector_t sector = 0; in wait_barrier() local
929 sector = conf->start_next_window; in wait_barrier()
935 return sector; in wait_barrier()
[all …]
Draid5.c138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) in r5_next_bio() argument
141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio()
449 (unsigned long long)sh->sector); in remove_hash()
456 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
459 (unsigned long long)sh->sector); in insert_hash()
521 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
532 (unsigned long long)sector); in init_stripe()
537 sh->sector = sector; in init_stripe()
538 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
547 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
[all …]
Draid10.c410 (unsigned long long)r10_bio->sector); in raid10_end_read_request()
419 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
555 sector_t sector; in __raid10_find_phys() local
569 chunk = r10bio->sector >> geo->chunk_shift; in __raid10_find_phys()
570 sector = r10bio->sector & geo->chunk_mask; in __raid10_find_phys()
578 sector += stripe << geo->chunk_shift; in __raid10_find_phys()
584 sector_t s = sector; in __raid10_find_phys()
610 sector += (geo->chunk_mask + 1); in __raid10_find_phys()
620 ((r10bio->sector >= conf->reshape_progress) != in raid10_find_phys()
630 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) in raid10_find_virt() argument
[all …]
Ddm-crypt.c62 sector_t sector; member
682 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); in crypt_iv_tcw_whitening() local
689 crypto_xor(buf, (u8 *)&sector, 8); in crypt_iv_tcw_whitening()
690 crypto_xor(&buf[8], (u8 *)&sector, 8); in crypt_iv_tcw_whitening()
721 u64 sector = cpu_to_le64((u64)dmreq->iv_sector); in crypt_iv_tcw_gen() local
734 crypto_xor(iv, (u8 *)&sector, 8); in crypt_iv_tcw_gen()
736 crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8); in crypt_iv_tcw_gen()
805 sector_t sector) in crypt_convert_init() argument
813 ctx->cc_sector = sector + cc->iv_offset; in crypt_convert_init()
1036 struct bio *bio, sector_t sector) in crypt_io_init() argument
[all …]
Ddm-stripe.c211 static void stripe_map_sector(struct stripe_c *sc, sector_t sector, in stripe_map_sector() argument
214 sector_t chunk = dm_target_offset(sc->ti, sector); in stripe_map_sector()
239 static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, in stripe_map_range_sector() argument
244 stripe_map_sector(sc, sector, &stripe, result); in stripe_map_range_sector()
249 sector = *result; in stripe_map_range_sector()
251 *result -= sector_div(sector, sc->chunk_size); in stripe_map_range_sector()
253 *result = sector & ~(sector_t)(sc->chunk_size - 1); in stripe_map_range_sector()
Dlinear.c30 static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) in which_dev() argument
46 if (sector < conf->disks[mid].end_sector) in which_dev()
69 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in linear_mergeable_bvec() local
73 dev0 = which_dev(mddev, sector); in linear_mergeable_bvec()
74 maxsectors = dev0->end_sector - sector; in linear_mergeable_bvec()
Ddm-exception-store.h197 sector_t sector) in sector_to_chunk() argument
199 return sector >> store->chunk_shift; in sector_to_chunk()
Ddm.c1345 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) in max_io_len_target_boundary() argument
1347 sector_t target_offset = dm_target_offset(ti, sector); in max_io_len_target_boundary()
1352 static sector_t max_io_len(sector_t sector, struct dm_target *ti) in max_io_len() argument
1354 sector_t len = max_io_len_target_boundary(sector, ti); in max_io_len()
1361 offset = dm_target_offset(ti, sector); in max_io_len()
1433 sector_t sector; in __map_bio() local
1446 sector = clone->bi_iter.bi_sector; in __map_bio()
1452 tio->io->bio->bi_bdev->bd_dev, sector); in __map_bio()
1471 sector_t sector; member
1475 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) in bio_setup_sector() argument
[all …]
Ddm-snap.c929 sector_t sector, unsigned chunk_size);
998 dest.sector = chunk_to_sector(s->store, old_chunk); in snapshot_merge_next_chunks()
999 dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); in snapshot_merge_next_chunks()
1002 src.sector = chunk_to_sector(s->store, new_chunk); in snapshot_merge_next_chunks()
1015 while (origin_write_extent(s, dest.sector, io_size)) { in snapshot_merge_next_chunks()
1563 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); in start_copy()
1564 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); in start_copy()
1567 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); in start_copy()
2047 static int __origin_write(struct list_head *snapshots, sector_t sector, in __origin_write() argument
2074 if (sector >= dm_table_get_size(snap->ti->table)) in __origin_write()
[all …]
Draid5.h204 sector_t sector; /* sector of this row */ member
245 sector_t sector; /* sector of this page */ member
Draid1.h125 sector_t sector; member
Draid10.h96 sector_t sector; /* virtual sector number */ member
Dbitmap.c1563 sector_t sector = 0; in bitmap_close_sync() local
1567 while (sector < bitmap->mddev->resync_max_sectors) { in bitmap_close_sync()
1568 bitmap_end_sync(bitmap, sector, &blocks, 0); in bitmap_close_sync()
1569 sector += blocks; in bitmap_close_sync()
1574 void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector) in bitmap_cond_end_sync() argument
1581 if (sector == 0) { in bitmap_cond_end_sync()
1591 bitmap->mddev->curr_resync_completed = sector; in bitmap_cond_end_sync()
1593 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); in bitmap_cond_end_sync()
1595 while (s < sector && s < bitmap->mddev->resync_max_sectors) { in bitmap_cond_end_sync()
1817 sector_t sector = 0; in bitmap_load() local
[all …]
Dmd.h126 sector_t sector; member
643 sector_t sector, int size, struct page *page);
645 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Dbitmap.h258 void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
Ddm-raid1.c269 io[i].sector = 0; in mirror_flush()
339 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover()
358 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); in recover()
407 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) in choose_mirror() argument
460 io->sector = map_sector(m, bio); in map_region()
Ddm.h61 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
Ddm-region-hash.c116 static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) in dm_rh_sector_to_region() argument
118 return sector >> rh->region_shift; in dm_rh_sector_to_region()
DKconfig133 of a single drive. For a given sector (row) number, (N - 1) drives
141 against a failure of any two drives. For a given sector
356 of a single drive. For a given sector (row) number, (N - 1) drives
364 against a failure of any two drives. For a given sector
Ddm-log.c305 .sector = 0, in flush_header()
438 lc->header_location.sector = 0; in create_log_context()
Ddm-kcopyd.c657 sub_job->source.sector += progress; in segment_complete()
661 sub_job->dests[i].sector += progress; in segment_complete()
Dmd.c751 sector_t sector, int size, struct page *page) in md_super_write() argument
762 bio->bi_iter.bi_sector = sector; in md_super_write()
777 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, in sync_page_io() argument
786 bio->bi_iter.bi_sector = sector + rdev->sb_start; in sync_page_io()
789 (sector >= rdev->mddev->reshape_position))) in sync_page_io()
790 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; in sync_page_io()
792 bio->bi_iter.bi_sector = sector + rdev->data_offset; in sync_page_io()
1502 u64 sector = bb >> 10; in super_1_load() local
1503 sector <<= sb->bblog_shift; in super_1_load()
1508 sector, count, 1) == 0) in super_1_load()
[all …]
Ddm-io.c321 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); in do_region()
Ddm-table.c1181 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) in dm_table_find_target() argument
1191 if (node[k] >= sector) in dm_table_find_target()
Ddm-bufio.c563 .sector = block << b->c->sectors_per_block_bits, in use_dmio()
1306 .sector = 0, in dm_bufio_issue_flush()
Ddm-snap-persistent.c233 .sector = ps->store->chunk_size * chunk, in chunk_io()
Ddm-cache-target.c1049 c_region.sector = cblock * cache->sectors_per_block; in issue_copy()
1054 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block; in issue_copy()
1058 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block; in issue_copy()
Ddm-thin.c972 to.sector = begin; in ll_zero()
1036 from.sector = data_origin * pool->sectors_per_block; in schedule_copy()
1040 to.sector = data_dest * pool->sectors_per_block; in schedule_copy()
Ddm-ioctl.c1558 ti = dm_table_find_target(table, tmsg->sector); in target_message()
/linux-4.1.27/drivers/usb/storage/
Djumpshot.c164 u32 sector, in jumpshot_read_data() argument
180 if (sector > 0x0FFFFFFF) in jumpshot_read_data()
202 command[2] = sector & 0xFF; in jumpshot_read_data()
203 command[3] = (sector >> 8) & 0xFF; in jumpshot_read_data()
204 command[4] = (sector >> 16) & 0xFF; in jumpshot_read_data()
206 command[5] = 0xE0 | ((sector >> 24) & 0x0F); in jumpshot_read_data()
226 sector += thistime; in jumpshot_read_data()
241 u32 sector, in jumpshot_write_data() argument
257 if (sector > 0x0FFFFFFF) in jumpshot_write_data()
284 command[2] = sector & 0xFF; in jumpshot_write_data()
[all …]
Ddatafab.c145 u32 sector, in datafab_read_data() argument
190 command[2] = sector & 0xFF; in datafab_read_data()
191 command[3] = (sector >> 8) & 0xFF; in datafab_read_data()
192 command[4] = (sector >> 16) & 0xFF; in datafab_read_data()
195 command[5] |= (sector >> 24) & 0x0F; in datafab_read_data()
213 sector += thistime; in datafab_read_data()
228 u32 sector, in datafab_write_data() argument
278 command[2] = sector & 0xFF; in datafab_write_data()
279 command[3] = (sector >> 8) & 0xFF; in datafab_write_data()
280 command[4] = (sector >> 16) & 0xFF; in datafab_write_data()
[all …]
Dshuttle_usbat.c208 u32 sector, unsigned char cmd) in usbat_pack_ata_sector_cmd() argument
212 buf[2] = sector & 0xFF; in usbat_pack_ata_sector_cmd()
213 buf[3] = (sector >> 8) & 0xFF; in usbat_pack_ata_sector_cmd()
214 buf[4] = (sector >> 16) & 0xFF; in usbat_pack_ata_sector_cmd()
215 buf[5] = 0xE0 | ((sector >> 24) & 0x0F); in usbat_pack_ata_sector_cmd()
1116 u32 sector, in usbat_flash_read_data() argument
1147 if (sector > 0x0FFFFFFF) in usbat_flash_read_data()
1172 usbat_pack_ata_sector_cmd(command, thistime, sector, 0x20); in usbat_flash_read_data()
1190 sector += thistime; in usbat_flash_read_data()
1207 u32 sector, in usbat_flash_write_data() argument
[all …]
Dtransport.c526 u32 sector; in last_sector_hacks() local
549 sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) | in last_sector_hacks()
557 if (sector + 1 != sdkp->capacity) in last_sector_hacks()
/linux-4.1.27/fs/fat/
Dcache.c304 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, in fat_bmap() argument
317 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { in fat_bmap()
318 *phys = sector + sbi->dir_start; in fat_bmap()
325 if (sector >= last_block) { in fat_bmap()
335 if (sector >= last_block) in fat_bmap()
339 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); in fat_bmap()
340 offset = sector & (sbi->sec_per_clus - 1); in fat_bmap()
347 if (*mapped_blocks > last_block - sector) in fat_bmap()
348 *mapped_blocks = last_block - sector; in fat_bmap()
Dfat.h288 extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
/linux-4.1.27/drivers/mtd/nand/
Datmel_nand_ecc.h120 #define pmecc_readb_ecc_relaxed(addr, sector, n) \ argument
121 readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
123 #define pmecc_readl_rem_relaxed(addr, sector, n) \ argument
124 readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
Dsh_flctl.c446 (struct sh_flctl *flctl, uint8_t *buff, int sector) in read_ecfiforeg() argument
452 res = wait_recfifo_ready(flctl , sector); in read_ecfiforeg()
582 int sector, page_sectors; in execmd_read_page_sector() local
598 for (sector = 0; sector < page_sectors; sector++) { in execmd_read_page_sector()
599 read_fiforeg(flctl, 512, 512 * sector); in execmd_read_page_sector()
602 &flctl->done_buff[mtd->writesize + 16 * sector], in execmd_read_page_sector()
603 sector); in execmd_read_page_sector()
653 int sector, page_sectors; in execmd_write_page_sector() local
666 for (sector = 0; sector < page_sectors; sector++) { in execmd_write_page_sector()
667 write_fiforeg(flctl, 512, 512 * sector); in execmd_write_page_sector()
[all …]
Dfsl_ifc_nand.c335 int sector = bufnum * chip->ecc.steps; in fsl_ifc_run_command() local
336 int sector_end = sector + chip->ecc.steps - 1; in fsl_ifc_run_command()
338 for (i = sector / 4; i <= sector_end / 4; i++) in fsl_ifc_run_command()
341 for (i = sector; i <= sector_end; i++) { in fsl_ifc_run_command()
Datmel_nand.c534 static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector) in pmecc_gen_syndrome() argument
543 value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2); in pmecc_gen_syndrome()
/linux-4.1.27/drivers/char/
Dps3flash.c110 u64 size, sector, offset; in ps3flash_read() local
130 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read()
140 res = ps3flash_fetch(dev, sector); in ps3flash_read()
163 sector += priv->chunk_sectors; in ps3flash_read()
179 u64 size, sector, offset; in ps3flash_write() local
199 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write()
210 res = ps3flash_fetch(dev, sector); in ps3flash_write()
211 else if (sector != priv->tag) in ps3flash_write()
231 priv->tag = sector; in ps3flash_write()
238 sector += priv->chunk_sectors; in ps3flash_write()
/linux-4.1.27/drivers/mtd/
Drfd_ftl.c90 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
241 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_readsect() argument
248 if (sector >= part->sector_count) in rfd_ftl_readsect()
251 addr = part->sector_map[sector]; in rfd_ftl_readsect()
639 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) in do_writesect() argument
680 part->sector_map[sector] = addr; in do_writesect()
682 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); in do_writesect()
705 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_writesect() argument
712 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); in rfd_ftl_writesect()
719 if (sector >= part->sector_count) { in rfd_ftl_writesect()
[all …]
Dftl.c788 u_long sector, u_long nblocks) in ftl_read() argument
796 part, sector, nblocks); in ftl_read()
804 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) { in ftl_read()
808 log_addr = part->VirtualBlockMap[sector+i]; in ftl_read()
902 u_long sector, u_long nblocks) in ftl_write() argument
910 part, sector, nblocks); in ftl_write()
924 virt_addr = sector * SECTOR_SIZE | BLOCK_DATA; in ftl_write()
961 old_addr = part->VirtualBlockMap[sector+i]; in ftl_write()
963 part->VirtualBlockMap[sector+i] = 0xffffffff; in ftl_write()
972 part->VirtualBlockMap[sector+i] = log_addr; in ftl_write()
[all …]
/linux-4.1.27/drivers/target/
Dtarget_core_sbc.c1191 sector_t sector = cmd->t_task_lba; in sbc_dif_generate() local
1212 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); in sbc_dif_generate()
1218 "WRITE" : "READ", (unsigned long long)sector, in sbc_dif_generate()
1222 sector++; in sbc_dif_generate()
1233 const void *p, sector_t sector, unsigned int ei_lba) in sbc_dif_v1_verify() argument
1246 " csum 0x%04x\n", (unsigned long long)sector, in sbc_dif_v1_verify()
1256 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { in sbc_dif_v1_verify()
1258 " sector MSB: 0x%08x\n", (unsigned long long)sector, in sbc_dif_v1_verify()
1259 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); in sbc_dif_v1_verify()
1266 " ei_lba: 0x%08x\n", (unsigned long long)sector, in sbc_dif_v1_verify()
[all …]
/linux-4.1.27/drivers/mtd/devices/
Ddocg3.c410 static void doc_setup_addr_sector(struct docg3 *docg3, int sector) in doc_setup_addr_sector() argument
413 doc_flash_address(docg3, sector & 0xff); in doc_setup_addr_sector()
414 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_addr_sector()
415 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_addr_sector()
425 static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) in doc_setup_writeaddr_sector() argument
430 doc_flash_address(docg3, sector & 0xff); in doc_setup_writeaddr_sector()
431 doc_flash_address(docg3, (sector >> 8) & 0xff); in doc_setup_writeaddr_sector()
432 doc_flash_address(docg3, (sector >> 16) & 0xff); in doc_setup_writeaddr_sector()
451 int sector, ret = 0; in doc_read_seek() local
473 sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); in doc_read_seek()
[all …]
/linux-4.1.27/kernel/power/
Dblock_io.c28 static int submit(int rw, struct block_device *bdev, sector_t sector, in submit() argument
35 bio->bi_iter.bi_sector = sector; in submit()
41 (unsigned long long)sector); in submit()
/linux-4.1.27/Documentation/devicetree/bindings/mtd/
Datmel-nand.txt28 - atmel,pmecc-sector-size : sector size for ECC computation. Supported values
31 for different sector size. First one is for sector size 512, the next is for
32 sector size 1024. If not specified, driver will build the table in runtime.
82 atmel,pmecc-sector-size = <512>;
Dmtd-physmap.txt29 - use-advanced-sector-protection: boolean to enable support for the
30 advanced sector protection (Spansion: PPB - Persistent Protection
/linux-4.1.27/Documentation/device-mapper/
Ddm-crypt.txt33 then sectors are encrypted according to their offsets (sector 0 uses key0;
34 sector 1 uses key1 etc.). <keycount> must be a power of two.
37 The IV offset is a sector count that is added to the sector number
46 Starting sector within the device where the encrypted data begins.
Dkcopyd.txt17 block-device along with the starting sector and size of the region. The source
23 sector_t sector;
Dlog-writes.txt65 <#logged entries> <highest allocated sector>
68 highest allocated sector : Highest allocated sector
Ddm-io.txt10 sector and size of the region.
14 sector_t sector;
Dlinear.txt11 <offset>: Starting sector within the device.
Dstriped.txt16 <offset>: Starting sector within the device.
Ddm-flakey.txt24 <offset>: Starting sector within the device.
Dswitch.txt89 of 512-byte sectors). This number is added to the sector number when
Dstatistics.txt9 Each user-defined region specifies a starting sector, length and step.
Dverity.txt139 Directly following the header (and with sector number padded to the next hash
Dthin-provisioning.txt385 <nr mapped sectors> <highest mapped sector>
/linux-4.1.27/fs/hfsplus/
Dwrapper.c46 int hfsplus_submit_bio(struct super_block *sb, sector_t sector, in hfsplus_submit_bio() argument
61 start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT; in hfsplus_submit_bio()
63 sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); in hfsplus_submit_bio()
66 bio->bi_iter.bi_sector = sector; in hfsplus_submit_bio()
Dextents.c228 sector_t sector; in hfsplus_get_block() local
275 sector = ((sector_t)dblock << sbi->fs_shift) + in hfsplus_get_block()
277 map_bh(bh_result, sb, sector); in hfsplus_get_block()
Dhfsplus_fs.h527 int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf,
/linux-4.1.27/fs/
Ddax.c33 sector_t sector = block << (inode->i_blkbits - 9); in dax_clear_blocks() local
41 count = bdev_direct_access(bdev, sector, &addr, &pfn, size); in dax_clear_blocks()
57 sector += pgsz / 512; in dax_clear_blocks()
69 sector_t sector = bh->b_blocknr << (blkbits - 9); in dax_get_addr() local
70 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size); in dax_get_addr()
274 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); in dax_insert_mapping() local
296 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size); in dax_insert_mapping()
Dblock_dev.c375 int bdev_read_page(struct block_device *bdev, sector_t sector, in bdev_read_page() argument
381 return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); in bdev_read_page()
404 int bdev_write_page(struct block_device *bdev, sector_t sector, in bdev_write_page() argument
413 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); in bdev_write_page()
439 long bdev_direct_access(struct block_device *bdev, sector_t sector, in bdev_direct_access() argument
449 if ((sector + DIV_ROUND_UP(size, 512)) > in bdev_direct_access()
452 sector += get_start_sect(bdev); in bdev_direct_access()
453 if (sector % (PAGE_SIZE / 512)) in bdev_direct_access()
455 avail = ops->direct_access(bdev, sector, addr, pfn, size); in bdev_direct_access()
Ddirect-io.c649 sector_t sector; in dio_new_bio() local
655 sector = start_sector << (sdio->blkbits - 9); in dio_new_bio()
658 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); in dio_new_bio()
/linux-4.1.27/arch/xtensa/platforms/iss/
Dsimdisk.c72 static void simdisk_transfer(struct simdisk *dev, unsigned long sector, in simdisk_transfer() argument
75 unsigned long offset = sector << SECTOR_SHIFT; in simdisk_transfer()
108 sector_t sector = bio->bi_iter.bi_sector; in simdisk_xfer_bio() local
114 simdisk_transfer(dev, sector, len, buffer, in simdisk_xfer_bio()
116 sector += len; in simdisk_xfer_bio()
/linux-4.1.27/Documentation/block/
Ddata-integrity.txt16 checksum for each sector as well as an incrementing counter that
42 information to each sector. The data + integrity metadata is stored
93 and the block layer make things like hardware sector size and
113 Some storage devices allow each hardware sector to be tagged with a
201 sector must be set, and the bio should have all data pages
232 the sector numbers will be remapped as the request traverses the
262 .tag_size = <tag bytes per hw sector>,
276 metadata per sector. I.e. 8 for DIF and EPP.
279 are available per hardware sector. For DIF this is either 2 or
Drequest.txt53 sector_t sector DBI Target location
55 unsigned long hard_nr_sectors B Used to keep sector sane
Ddeadline-iosched.txt37 write) which are serviced in increasing sector order. To limit extra seeking,
70 rbtree front sector lookup when the io scheduler merge function is called.
Dbiodoc.txt160 Lowest possible sector size that the hardware can operate
310 request->buffer, request->sector and request->nr_sectors or
521 sector_t sector; /* this field is now of type sector_t instead of int
538 /* Various sector counts */
559 The behaviour of the various sector counts are almost the same as before,
1003 AS and deadline use a hash table indexed by the last sector of a request. This
1069 5.2 64 bit sector numbers (sector_t prepares for 64 bit support)
1071 The sector number used in the bio structure has been changed to sector_t,
1072 which could be defined as 64 bit in preparation for 64 bit sector support.
1080 provides drivers with a sector number relative to whole device, rather than
[all …]
Dqueue-sysfs.txt39 This is the hardware sector size of the device, in bytes.
/linux-4.1.27/include/linux/
Dnvme.h144 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) in nvme_block_nr() argument
146 return (sector >> (ns->lba_shift - 9)); in nvme_block_nr()
Ddm-io.h19 sector_t sector; member
Dblkdev.h1161 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1163 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1165 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1277 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) in queue_limit_alignment_offset() argument
1280 unsigned int alignment = sector_div(sector, granularity >> 9) << 9; in queue_limit_alignment_offset()
1306 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) in queue_limit_discard_alignment() argument
1320 offset = sector_div(sector, granularity); in queue_limit_discard_alignment()
Dpktcdvd.h120 sector_t sector; /* First sector in this packet */ member
Data.h988 unsigned buf_size, u64 sector, unsigned long count) in ata_set_lba_range_entries() argument
994 u64 entry = sector | in ata_set_lba_range_entries()
1000 sector += 0xffff; in ata_set_lba_range_entries()
Ddevice-mapper.h596 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) argument
Dgenhd.h71 unsigned char sector; /* starting sector */ member
292 sector_t sector);
/linux-4.1.27/drivers/s390/block/
Ddasd_eckd.h114 __u32 sector; member
126 __u32 sector; member
183 __u8 sector; member
205 __u8 sector; member
Ddasd_eckd.c350 int sector; in fill_LRE_data() local
356 sector = 0; in fill_LRE_data()
362 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; in fill_LRE_data()
366 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; in fill_LRE_data()
370 data->sector = sector; in fill_LRE_data()
424 data->sector = 0xFF; in fill_LRE_data()
453 data->sector = 0xFF; in fill_LRE_data()
635 int sector; in locate_record() local
650 sector = 0; in locate_record()
656 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; in locate_record()
[all …]
/linux-4.1.27/Documentation/mmc/
Dmmc-dev-attrs.txt30 rel_sectors Reliable write sector count
38 always one 512 byte sector. For SD, "erase_size" is 512
58 (especially for SD where it is just one sector),
/linux-4.1.27/kernel/trace/
Dblktrace.c172 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, in act_log_check() argument
177 if (sector && (sector < bt->start_lba || sector > bt->end_lba)) in act_log_check()
201 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, in __blk_add_trace() argument
226 if (act_log_check(bt, what, sector, pid)) in __blk_add_trace()
270 t->sector = sector; in __blk_add_trace()
1120 return te_blk_io_trace(ent)->sector; in t_sector()
1388 const int offset = offsetof(struct blk_io_trace, sector); in blk_trace_synthesize_old_trace()
1395 trace_seq_putmem(s, &t->sector, in blk_trace_synthesize_old_trace()
/linux-4.1.27/arch/cris/boot/rescue/
Dhead_v10.S70 ;; The partitiontable is looked for at the first sector after the boot
71 ;; sector. Sector size is 65536 bytes in all flashes we use.
134 ;; rescue code without erasing/reflashing the sector.
137 ;; without erasing the sector, it is possible to add new
/linux-4.1.27/include/uapi/linux/
Dvirtio_blk.h133 __virtio64 sector; member
Ddm-ioctl.h211 __u64 sector; /* Device sector */ member
Dblktrace_api.h102 __u64 sector; /* disk offset */ member
Dhdreg.h41 unsigned sector : 1; member
/linux-4.1.27/arch/powerpc/boot/dts/
Dmpc5121ads.dts45 reg = <0x00000000 0x00040000>; // first sector is protected
58 reg = <0x03ec0000 0x00040000>; // one sector for device tree
/linux-4.1.27/arch/powerpc/sysdev/
Daxonram.c143 axon_ram_direct_access(struct block_device *device, sector_t sector, in axon_ram_direct_access() argument
147 loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT; in axon_ram_direct_access()
/linux-4.1.27/Documentation/filesystems/
Dudf.txt11 dvd+rw drives and media support true random sector writes, and so a udf
16 and read-modify-write cycles to allow the filesystem random sector writes
Disofs.txt41 sbsector=xxx Session begins from sector xxx
Ddax.txt29 block device operation. It is used to translate the sector number
Dntfs.txt183 the boot sector from the backup copy.
302 the "Start sector of device" when creating the table. Once again ldminfo would
311 # Offset into Size of this Raid type Device Start sector
Dbtrfs.txt137 is limited by the root sector size, with some space unavailable due
/linux-4.1.27/arch/arm/boot/dts/
Dat91sam9x5cm.dtsi56 atmel,pmecc-sector-size = <512>;
Dat91-cosino.dtsi99 atmel,pmecc-sector-size = <512>;
Dsama5d3xcm.dtsi83 atmel,pmecc-sector-size = <512>;
Dat91sam9n12ek.dts151 atmel,pmecc-sector-size = <512>;
Dat91-sama5d3_xplained.dts231 atmel,pmecc-sector-size = <512>;
/linux-4.1.27/arch/um/drivers/
Dubd_kern.c1157 __u64 sector = io_offset >> 9; in cowify_bitmap() local
1163 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) in cowify_bitmap()
1167 ubd_set_bit(sector + i, (unsigned char *) bitmap); in cowify_bitmap()
1173 *cow_offset = sector / (sizeof(unsigned long) * 8); in cowify_bitmap()
1194 __u64 sector = req->offset >> 9; in cowify_req() local
1202 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) in cowify_req()
/linux-4.1.27/Documentation/x86/
Dboot.txt78 | Kernel boot sector | The kernel legacy boot sector.
82 | Boot loader | <- Boot sector entry point 0000:7C00
93 0x100000 ("high memory"), and the kernel real-mode block (boot sector,
129 | Kernel boot sector | The kernel legacy boot sector.
131 | Boot loader | <- Boot sector entry point 0000:7C00
147 sector" refers to 512 bytes. It is independent of the actual sector
151 real-mode code (boot sector and setup code) and then examine the
154 sectors (1K) and then examine the bootup sector size.
245 sector (always one 512-byte sector) plus the setup code.
445 The unit is bytes starting with the beginning of the boot sector.
[all …]
/linux-4.1.27/drivers/ide/
Dide-cd.c148 unsigned long sector; in cdrom_analyze_sense_data() local
181 sector = (sense->information[0] << 24) | in cdrom_analyze_sense_data()
188 sector <<= 2; in cdrom_analyze_sense_data()
191 sector &= ~(bio_sectors - 1); in cdrom_analyze_sense_data()
201 if (sector < get_capacity(info->disk) && in cdrom_analyze_sense_data()
202 drive->probed_capacity - sector < 4 * 75) in cdrom_analyze_sense_data()
203 set_capacity(info->disk, sector); in cdrom_analyze_sense_data()
Dide-gd.c160 struct request *rq, sector_t sector) argument
162 return drive->disk_ops->do_request(drive, rq, sector);
Dide-floppy.c192 unsigned long sector) in idefloppy_create_rw_cmd() argument
195 int block = sector / floppy->bs_factor; in idefloppy_create_rw_cmd()
Dide-taskfile.c554 if (req_task->out_flags.b.sector) in ide_taskfile_ioctl()
/linux-4.1.27/include/linux/mtd/
Ddoc2000.h218 int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]);
/linux-4.1.27/fs/udf/
Dsuper.c695 loff_t sector = VSD_FIRST_SECTOR_OFFSET; in udf_check_vsd() local
708 sector += (sbi->s_session << sb->s_blocksize_bits); in udf_check_vsd()
711 (unsigned int)(sector >> sb->s_blocksize_bits), in udf_check_vsd()
723 for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET; in udf_check_vsd()
724 sector += sectorsize) { in udf_check_vsd()
726 bh = udf_tread(sb, sector >> sb->s_blocksize_bits); in udf_check_vsd()
732 (sector & (sb->s_blocksize - 1))); in udf_check_vsd()
766 nsr02 = sector; in udf_check_vsd()
769 nsr03 = sector; in udf_check_vsd()
788 else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) == in udf_check_vsd()
/linux-4.1.27/drivers/infiniband/ulp/iser/
Discsi_iser.c401 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) in iscsi_iser_check_protection() argument
407 sector); in iscsi_iser_check_protection()
410 sector); in iscsi_iser_check_protection()
Diser_verbs.c1229 enum iser_data_dir cmd_dir, sector_t *sector) in iser_check_task_pi_status() argument
1250 *sector = scsi_get_lba(iser_task->sc) + sector_off; in iser_check_task_pi_status()
1255 (unsigned long long)*sector, in iser_check_task_pi_status()
Discsi_iser.h640 enum iser_data_dir cmd_dir, sector_t *sector);
/linux-4.1.27/drivers/block/mtip32xx/
Dmtip32xx.h226 unsigned char sector; member
Dmtip32xx.c1803 fis.sector = command[3]; in exec_drive_task()
1895 fis.sector = command[1]; in exec_drive_command()
/linux-4.1.27/fs/btrfs/
Dextent_io.c2033 u64 sector; in repair_io_failure() local
2058 sector = bbio->stripes[mirror_num-1].physical >> 9; in repair_io_failure()
2059 bio->bi_iter.bi_sector = sector; in repair_io_failure()
2079 rcu_str_deref(dev->name), sector); in repair_io_failure()
2790 struct page *page, sector_t sector, in submit_extent_page() argument
2812 contig = bio->bi_iter.bi_sector == sector; in submit_extent_page()
2814 contig = bio_end_sector(bio) == sector; in submit_extent_page()
2836 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); in submit_extent_page()
2923 sector_t sector; in __do_readpage() local
3002 sector = em->block_start >> 9; in __do_readpage()
[all …]
Dscrub.c237 sector_t sector; member
590 (unsigned long long)swarn->sector, root, inum, offset, in scrub_print_warning_inode()
602 (unsigned long long)swarn->sector, root, inum, offset, ret); in scrub_print_warning_inode()
633 swarn.sector = (sblock->pagev[0]->physical) >> 9; in scrub_print_warning()
660 (unsigned long long)swarn.sector, in scrub_print_warning()
Dvolumes.c5721 sector_t sector) in bio_size_ok() argument
5728 .bi_sector = sector, in bio_size_ok()
/linux-4.1.27/drivers/block/zram/
Dzram_drv.c1016 static int zram_rw_page(struct block_device *bdev, sector_t sector, in zram_rw_page() argument
1028 if (!valid_io_request(zram, sector, PAGE_SIZE)) { in zram_rw_page()
1034 index = sector >> SECTORS_PER_PAGE_SHIFT; in zram_rw_page()
1035 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; in zram_rw_page()
/linux-4.1.27/drivers/scsi/be2iscsi/
Dbe_mgmt.c492 unsigned short region, sector_size, sector, offset; in mgmt_vendor_specific_fw_cmd() local
499 sector = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; in mgmt_vendor_specific_fw_cmd()
502 req->sector = sector; in mgmt_vendor_specific_fw_cmd()
508 offset = sector * sector_size + offset; in mgmt_vendor_specific_fw_cmd()
Dbe_mgmt.h228 unsigned short sector; member
/linux-4.1.27/arch/x86/boot/
Dheader.S274 # header, from the old boot sector.
/linux-4.1.27/Documentation/
Dprintk-formats.txt21 printk("test: sector number/total blocks: %llu/%llu\n",
22 (unsigned long long)sector, (unsigned long long)blockcount);
Diostats.txt145 the partition which contains the first sector of the request after the
Dbcache.txt11 extants (which can be anywhere from a single sector to the bucket size). It's
405 Minimum granularity of writes - should match hardware sector size.
Dmd.txt205 This is either "none" or a sector number within the devices of
459 "sector length" to this file adds new acknowledged (i.e.
Dkernel-parameters.txt3797 device capacity by one sector);
3808 sector if the number is odd);
/linux-4.1.27/Documentation/ioctl/
Dhdio.txt31 HDIO_GET_ADDRESS get sector addressing mode
102 start starting sector of this partition.
114 purely by sector number nowadays (lba addressing), and the
662 SECTOR If out_flags.b.sector is set
/linux-4.1.27/fs/xfs/
Dxfs_buf.c1142 sector_t sector = bp->b_maps[map].bm_bn; in xfs_buf_ioapply_map() local
1172 bio->bi_iter.bi_sector = sector; in xfs_buf_ioapply_map()
1189 sector += BTOBB(nbytes); in xfs_buf_ioapply_map()
/linux-4.1.27/Documentation/ide/
DChangeLog.ide-cd.1994-200433 * Fix a bug in handling the sector cache which
266 * 4.61 Jan 22, 2004 - support hardware sector sizes other than 2kB,
/linux-4.1.27/include/scsi/
Dscsi_transport_iscsi.h170 u8 (*check_protection)(struct iscsi_task *task, sector_t *sector);
/linux-4.1.27/tools/lguest/
Dlguest.c2969 off = out.sector * 512; in blk_request()
2979 err(1, "Bad seek to sector %llu", out.sector); in blk_request()
2982 verbose("WRITE to sector %llu: %i\n", out.sector, ret); in blk_request()
3012 err(1, "Bad seek to sector %llu", out.sector); in blk_request()
/linux-4.1.27/Documentation/arm/
DInterrupts35 SMC9196 interrupts until it has finished transferring its multi-sector
/linux-4.1.27/Documentation/scsi/
Daha152x.txt107 The BIOS uses a cylinder/head/sector addressing scheme (C/H/S)
DChangeLog.1992-1997428 Flag device as needing sector size instead.
617 * sd.c: Add support for sd_hardsizes (hard sector sizes).
1008 get an unsupported sector size device.
1623 drive did not return sensible sector size.
1898 instead of INIT_REQUEST. Allow sector sizes of 1024 and 256. For
1905 to 1. Use INIT_SCSI_REQUEST. Allow 512 byte sector sizes. Do a
Dscsi_mid_low_api.txt740 bios_param - fetch head, sector, cylinder info for a disk
760 * bios_param - fetch head, sector, cylinder info for a disk
/linux-4.1.27/fs/gfs2/
Dops_fstype.c227 static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) in gfs2_read_super() argument
243 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); in gfs2_read_super()
/linux-4.1.27/drivers/net/wireless/ath/wil6210/
Dwmi.h398 __le32 sector; member
/linux-4.1.27/block/partitions/
DKconfig131 first sector a new partition table in BSD disklabel format. Saying Y
/linux-4.1.27/fs/ocfs2/
Daops.c689 sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) + in ocfs2_direct_IO_zero_extend() local
692 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, in ocfs2_direct_IO_zero_extend()
/linux-4.1.27/arch/cris/arch-v32/drivers/
DKconfig125 int "Byte-offset of partition table sector"
/linux-4.1.27/fs/befs/
DChangeLog244 * Fixed a problem with reading the superblock on devices with large sector
/linux-4.1.27/arch/cris/arch-v10/drivers/
DKconfig421 int "Byte-offset of partition table sector"
/linux-4.1.27/drivers/mtd/onenand/
Donenand_base.c230 static int onenand_page_address(int page, int sector) in onenand_page_address() argument
236 fsa = sector & ONENAND_FSA_MASK; in onenand_page_address()
/linux-4.1.27/Documentation/mic/mpssd/
Dmpssd.c1223 (hdr.sector * SECTOR_SIZE); in virtio_block()
/linux-4.1.27/Documentation/mtd/
Dnand_ecc.txt52 This figure represents a sector of 256 bytes.
/linux-4.1.27/Documentation/netlabel/
Ddraft-ietf-cipso-ipsecurity-01.txt49 for use in a variety of government and civil sector environments.
/linux-4.1.27/arch/mips/include/asm/octeon/
Dcvmx-mio-defs.h1096 uint64_t sector:1; member
1112 uint64_t sector:1;
/linux-4.1.27/
DCREDITS1025 D: Boot sector "..." printing