/linux-4.1.27/crypto/async_tx/ |
H A D | async_raid6_recov.c | 161 struct page **blocks, struct async_submit_ctl *submit) __2data_recov_4() 172 p = blocks[disks-2]; __2data_recov_4() 173 q = blocks[disks-1]; __2data_recov_4() 175 a = blocks[faila]; __2data_recov_4() 176 b = blocks[failb]; __2data_recov_4() 200 struct page **blocks, struct async_submit_ctl *submit) __2data_recov_5() 215 if (blocks[i] == NULL) __2data_recov_5() 224 p = blocks[disks-2]; __2data_recov_5() 225 q = blocks[disks-1]; __2data_recov_5() 226 g = blocks[good]; __2data_recov_5() 232 dp = blocks[faila]; __2data_recov_5() 233 dq = blocks[failb]; __2data_recov_5() 274 struct page **blocks, struct async_submit_ctl *submit) __2data_recov_n() 285 p = blocks[disks-2]; __2data_recov_n() 286 q = blocks[disks-1]; __2data_recov_n() 292 dp = blocks[faila]; __2data_recov_n() 293 blocks[faila] = NULL; __2data_recov_n() 294 blocks[disks-2] = dp; __2data_recov_n() 295 dq = blocks[failb]; __2data_recov_n() 296 blocks[failb] = NULL; __2data_recov_n() 297 blocks[disks-1] = dq; __2data_recov_n() 300 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); __2data_recov_n() 303 blocks[faila] = dp; __2data_recov_n() 304 blocks[failb] = dq; __2data_recov_n() 305 blocks[disks-2] = p; __2data_recov_n() 306 blocks[disks-1] = q; __2data_recov_n() 341 * async_raid6_2data_recov - asynchronously calculate two missing data blocks 346 * @blocks: array of source pointers where the last two entries are p and q 351 struct page **blocks, struct async_submit_ctl *submit) async_raid6_2data_recov() 365 * preserve the content of 'blocks' as the caller intended. async_raid6_2data_recov() 368 void **ptrs = scribble ? scribble : (void **) blocks; async_raid6_2data_recov() 372 if (blocks[i] == NULL) async_raid6_2data_recov() 375 ptrs[i] = page_address(blocks[i]); async_raid6_2data_recov() 386 if (blocks[i]) async_raid6_2data_recov() 400 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); async_raid6_2data_recov() 407 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); async_raid6_2data_recov() 409 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); async_raid6_2data_recov() 419 * @blocks: array of source pointers where the last two entries are p and q 424 struct page **blocks, struct async_submit_ctl *submit) async_raid6_datap_recov() 441 * preserve the content of 'blocks' as the caller intended. async_raid6_datap_recov() 444 void **ptrs = scribble ? scribble : (void **) blocks; async_raid6_datap_recov() 448 if (blocks[i] == NULL) async_raid6_datap_recov() 451 ptrs[i] = page_address(blocks[i]); async_raid6_datap_recov() 465 if (blocks[i]) { async_raid6_datap_recov() 474 p = blocks[disks-2]; async_raid6_datap_recov() 475 q = blocks[disks-1]; async_raid6_datap_recov() 480 dq = blocks[faila]; async_raid6_datap_recov() 481 blocks[faila] = NULL; async_raid6_datap_recov() 482 blocks[disks-1] = dq; async_raid6_datap_recov() 488 struct page *g = blocks[good]; async_raid6_datap_recov() 500 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); async_raid6_datap_recov() 504 blocks[faila] = dq; async_raid6_datap_recov() 505 blocks[disks-1] = q; async_raid6_datap_recov() 160 __2data_recov_4(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) __2data_recov_4() argument 199 __2data_recov_5(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) __2data_recov_5() argument 273 __2data_recov_n(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) __2data_recov_n() argument 350 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, struct async_submit_ctl *submit) async_raid6_2data_recov() argument 423 async_raid6_datap_recov(int disks, size_t bytes, int faila, struct page **blocks, struct async_submit_ctl *submit) async_raid6_datap_recov() argument
|
H A D | async_pq.c | 36 /* the struct page *blocks[] parameter passed to async_gen_syndrome() 38 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] 122 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, do_sync_gen_syndrome() argument 132 srcs = (void **) blocks; do_sync_gen_syndrome() 135 if (blocks[i] == NULL) { do_sync_gen_syndrome() 139 srcs[i] = page_address(blocks[i]) + offset; do_sync_gen_syndrome() 158 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 160 * @disks: number of blocks (including missing P or Q, see below) 168 * both) from the calculation by setting blocks[disks-2] or 169 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= 172 * buffers. If any source buffers (blocks[i] where i < disks - 2) are 178 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, async_gen_syndrome() argument 183 &P(blocks, disks), 2, async_gen_syndrome() 184 blocks, src_cnt, len); async_gen_syndrome() 188 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); async_gen_syndrome() 212 if (blocks[i] == NULL) async_gen_syndrome() 214 unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset, async_gen_syndrome() 226 if (P(blocks, disks)) async_gen_syndrome() 227 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), async_gen_syndrome() 235 if (Q(blocks, disks)) async_gen_syndrome() 236 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), async_gen_syndrome() 256 if (!P(blocks, disks)) { async_gen_syndrome() 257 P(blocks, disks) = pq_scribble_page; async_gen_syndrome() 260 if (!Q(blocks, disks)) { async_gen_syndrome() 261 Q(blocks, disks) = pq_scribble_page; async_gen_syndrome() 264 do_sync_gen_syndrome(blocks, offset, disks, len, submit); async_gen_syndrome() 271 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) pq_val_chan() argument 276 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks, pq_val_chan() 282 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 284 * @disks: number of blocks (including missing P or Q, see below) 290 * The same notes from async_gen_syndrome apply to the 'blocks', 296 async_syndrome_val(struct page **blocks, unsigned int offset, int disks, async_syndrome_val() argument 300 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); async_syndrome_val() 323 if (likely(blocks[i])) { async_syndrome_val() 324 unmap->addr[j] = dma_map_page(dev, blocks[i], async_syndrome_val() 333 if (!P(blocks, disks)) { async_syndrome_val() 337 pq[0] = dma_map_page(dev, P(blocks, disks), async_syndrome_val() 343 if (!Q(blocks, disks)) { async_syndrome_val() 347 pq[1] = dma_map_page(dev, Q(blocks, disks), async_syndrome_val() 374 struct page *p_src = P(blocks, disks); async_syndrome_val() 375 struct page *q_src = Q(blocks, disks); async_syndrome_val() 401 tx = async_xor(spare, blocks, offset, disks-2, len, submit); async_syndrome_val() 409 P(blocks, disks) = NULL; async_syndrome_val() 410 Q(blocks, disks) = spare; async_syndrome_val() 412 tx = async_gen_syndrome(blocks, offset, disks, len, submit); async_syndrome_val() 420 P(blocks, disks) = p_src; async_syndrome_val() 421 Q(blocks, disks) = q_src; async_syndrome_val()
|
/linux-4.1.27/include/linux/ |
H A D | dqblk_v1.h | 8 /* Numbers of blocks needed for updates */
|
H A D | dqblk_v2.h | 10 /* Numbers of blocks needed for updates */
|
H A D | dqblk_qtree.h | 10 /* Numbers of blocks needed for updates - we count with the smallest 30 unsigned int dqi_blocks; /* # of blocks in quota file */ 31 unsigned int dqi_free_blk; /* First block in list of free blocks */
|
H A D | sysv_fs.h | 21 #define SYSV_BADBL_INO 1 /* inode of bad blocks file */ 32 __fs16 s_nfree; /* number of free blocks in s_free, <= XENIX_NICFREE */ 51 __fs32 s_type; /* type of file system: 1 for 512 byte blocks 52 2 for 1024 byte blocks 53 3 for 2048 byte blocks */ 71 __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */ 93 __fs32 s_type; /* type of file system: 1 for 512 byte blocks 94 2 for 1024 byte blocks */ 102 __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */ 121 __fs32 s_type; /* type of file system: 1 for 512 byte blocks 122 2 for 1024 byte blocks */ 132 __fs16 s_nfree; /* number of free blocks in s_free, <= V7_NICFREE */ 170 __fs16 s_nfree; /* number of free blocks in s_free, <= COH_NICFREE */
|
H A D | stat.h | 34 unsigned long long blocks; member in struct:kstat
|
H A D | cpu_pm.h | 26 * power domain, the contents of some blocks (floating point coprocessors, 29 * and hotplug implementations to notify the drivers for these blocks that 38 * CPU. They are used to save per-cpu context for affected blocks. 41 * are used to save any global context for affected blocks, and must be called
|
H A D | task_io_accounting_ops.h | 16 * We approximate number of blocks, because we account bytes only. 30 * We approximate number of blocks, because we account bytes only.
|
H A D | amifd.h | 50 int blocks; /* total # blocks on disk */ member in struct:amiga_floppy_struct
|
H A D | nilfs2_fs.h | 50 * @i_blocks: blocks count 163 /*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */ 169 /*50*/ __le64 s_free_blocks_count; /* Free blocks count */ 183 __le16 s_def_resuid; /* Default uid for reserved blocks */ 184 __le16 s_def_resgid; /* Default gid for reserved blocks */ 250 #define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in 252 #define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in 356 * @fi_nblocks: number of blocks (including intermediate blocks) 357 * @fi_ndatablk: number of file data blocks 409 * @ss_nblocks: number of blocks 506 * @cp_nblk_inc: number of blocks incremented by this checkpoint 508 * @cp_blocks_count: blocks count 567 * @ci_nblk_inc: number of blocks incremented by this checkpoint 569 * @ci_blocks_count: blocks count 615 * @su_nblocks: number of blocks in segment 695 * @sui_nblocks: number of written blocks in segment 875 * @bd_oblocknr: disk block address (for skipping dead blocks)
|
H A D | f2fs_fs.h | 63 __le32 log_blocks_per_seg; /* log2 # of blocks per segment */ 67 __le64 block_count; /* total # of user blocks */ 105 __le64 user_block_count; /* # of user blocks */ 106 __le64 valid_block_count; /* # of valid blocks in main area */ 146 __le16 blk_count; /* Number of orphan inode blocks in CP */ 194 __le64 i_blocks; /* file size in blocks */ 212 __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ 272 * there-in blocks should occupy 64 bytes, 512 bits. 293 __u8 valid_map[SIT_VBLOCK_MAP_SIZE]; /* bitmap for valid blocks */ 354 * summary blocks
|
/linux-4.1.27/fs/omfs/ |
H A D | omfs_fs.h | 26 __be64 s_num_blocks; /* total number of FS blocks */ 29 __be32 s_mirrors; /* # of mirrors of system blocks */ 30 __be32 s_sys_blocksize; /* size of non-data blocks */ 48 __be64 r_num_blocks; /* total number of FS blocks */ 52 __be32 r_clustersize; /* size allocated for data blocks */ 53 __be64 r_mirrors; /* # of mirrors of system blocks */ 71 __be64 e_cluster; /* start location of a set of blocks */ 72 __be64 e_blocks; /* number of blocks after e_cluster */
|
H A D | bitmap.c | 124 * Tries to allocate a set of blocks. The request size depends on the 125 * type: for inodes, we must allocate sbi->s_mirrors blocks, and for file 126 * blocks, we try to allocate sbi->s_clustersize, but can always get away
|
/linux-4.1.27/arch/m68k/emu/ |
H A D | nfblock.c | 41 static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, nfhd_get_capacity() argument 45 virt_to_phys(blocks), virt_to_phys(blocksize)); nfhd_get_capacity() 56 u32 blocks, bsize; member in struct:nfhd_device 86 geo->cylinders = dev->blocks >> (6 - dev->bshift); nfhd_getgeo() 98 static int __init nfhd_init_one(int id, u32 blocks, u32 bsize) nfhd_init_one() argument 103 pr_info("nfhd%u: found device with %u blocks (%u bytes)\n", dev_id, nfhd_init_one() 104 blocks, bsize); nfhd_init_one() 116 dev->blocks = blocks; nfhd_init_one() 137 set_capacity(dev->disk, (sector_t)blocks * (bsize / 512)); nfhd_init_one() 156 u32 blocks, bsize; nfhd_init() local 170 if (nfhd_get_capacity(i, 0, &blocks, &bsize)) nfhd_init() 172 nfhd_init_one(i, blocks, bsize); nfhd_init()
|
/linux-4.1.27/block/partitions/ |
H A D | sgi.c | 22 __be32 num_blocks; /* Size in logical blocks */ 35 unsigned int start, blocks; sgi_partition() local 70 blocks = be32_to_cpu(p->num_blocks); sgi_partition() 72 if (blocks) { sgi_partition() 73 put_partition(state, slot, start, blocks); sgi_partition()
|
H A D | mac.h | 13 __be32 map_count; /* # blocks in partition map */ 15 __be32 block_count; /* number of blocks in partition */ 19 __be32 data_count; /* number of data blocks */
|
H A D | atari.h | 29 u32 hd_siz; /* size of disk in blocks */
|
/linux-4.1.27/include/uapi/linux/ |
H A D | efs_fs_sb.h | 39 __be32 fs_tfree; /* total free data blocks */ 53 __u32 total_blocks; /* total number of blocks in filesystem */ 54 __u32 group_size; /* # of blocks a group consists of */ 55 __u32 data_free; /* # of free data blocks */ 57 __u16 inode_blocks; /* # of blocks used for inodes in every grp */
|
H A D | gfs2_ondisk.h | 102 /* Address of superblock in GFS2 basic blocks */ 142 __be32 ri_length; /* length of rgrp header in fs blocks */ 146 __be32 ri_data; /* num of data blocks in rgrp */ 157 /* Number of blocks per byte in rgrp */ 259 __be64 di_blocks; /* number of blocks in file */ 351 * The blocks containing the values and the blocks containing the 355 * or a single indirect block pointing to blocks containing the 359 * so the number of blocks required depends upon block size. Since the 362 * number of blocks that an inode may have relating to extended attributes. 410 /* ld_data1 is the number of metadata blocks in the descriptor. 414 /* ld_data1 is the number of revoke blocks in the descriptor. 418 /* ld_data1 is the number of data blocks in the descriptor. 448 * blocks. 474 __be64 qb_limit; /* Hard limit of # blocks to alloc */ 476 __be64 qb_value; /* Current # blocks allocated */
|
H A D | hpet.h | 23 #define MAX_HPET_TBS 8 /* maximum hpet timer blocks */
|
H A D | virtio_blk.h | 76 /* alignment offset in logical blocks. */ 78 /* minimum I/O size without performance penalty in logical blocks. */ 80 /* optimal sustained I/O size in logical blocks. */
|
/linux-4.1.27/arch/arm64/crypto/ |
H A D | aes-glue.c | 58 int rounds, int blocks, int first); 60 int rounds, int blocks, int first); 63 int rounds, int blocks, u8 iv[], int first); 65 int rounds, int blocks, u8 iv[], int first); 68 int rounds, int blocks, u8 ctr[], int first); 71 int rounds, int blocks, u8 const rk2[], u8 iv[], 74 int rounds, int blocks, u8 const rk2[], u8 iv[], 105 unsigned int blocks; ecb_encrypt() local 112 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { ecb_encrypt() 114 (u8 *)ctx->key_enc, rounds, blocks, first); ecb_encrypt() 127 unsigned int blocks; ecb_decrypt() local 134 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { ecb_decrypt() 136 (u8 *)ctx->key_dec, rounds, blocks, first); ecb_decrypt() 149 unsigned int blocks; cbc_encrypt() local 156 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { cbc_encrypt() 158 (u8 *)ctx->key_enc, rounds, blocks, walk.iv, cbc_encrypt() 172 unsigned int blocks; cbc_decrypt() local 179 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { cbc_decrypt() 181 (u8 *)ctx->key_dec, rounds, blocks, walk.iv, cbc_decrypt() 195 int blocks; ctr_encrypt() local 203 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { ctr_encrypt() 205 (u8 *)ctx->key_enc, rounds, blocks, walk.iv, ctr_encrypt() 208 nbytes -= blocks * AES_BLOCK_SIZE; ctr_encrypt() 215 u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; ctr_encrypt() 216 u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; ctr_encrypt() 223 blocks = (nbytes <= 8) ? -1 : 1; ctr_encrypt() 226 blocks, walk.iv, first); ctr_encrypt() 241 unsigned int blocks; xts_encrypt() local 248 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { xts_encrypt() 250 (u8 *)ctx->key1.key_enc, rounds, blocks, xts_encrypt() 265 unsigned int blocks; xts_decrypt() local 272 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { xts_decrypt() 274 (u8 *)ctx->key1.key_dec, rounds, blocks, xts_decrypt()
|
H A D | aes-modes.S | 30 * - encrypt_block2x - encrypt 2 blocks in parallel (if INTERLEAVE == 2) 31 * - decrypt_block2x - decrypt 2 blocks in parallel (if INTERLEAVE == 2) 32 * - encrypt_block4x - encrypt 4 blocks in parallel (if INTERLEAVE == 4) 33 * - decrypt_block4x - decrypt 4 blocks in parallel (if INTERLEAVE == 4) 108 * int blocks, int first) 110 * int blocks, int first) 124 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */ 128 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ 160 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */ 164 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 187 * int blocks, u8 iv[], int first) 189 * int blocks, u8 iv[], int first) 221 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */ 230 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */ 265 * int blocks, u8 ctr[], int first) 301 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */ 318 ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */ 346 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */ 375 * int blocks, u8 const rk2[], u8 iv[], int first) 377 * int blocks, u8 const rk2[], u8 iv[], int first) 410 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 pt blocks */ 425 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */ 482 ld1 {v0.16b-v1.16b}, [x1], #32 /* get 2 ct blocks */ 497 ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
|
H A D | ghash-ce-glue.c | 36 asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src, 57 int blocks; ghash_update() local 67 blocks = len / GHASH_BLOCK_SIZE; ghash_update() 71 pmull_ghash_update(blocks, ctx->digest, src, key, ghash_update() 74 src += blocks * GHASH_BLOCK_SIZE; ghash_update()
|
/linux-4.1.27/arch/arm/crypto/ |
H A D | aes-ce-glue.c | 28 int rounds, int blocks); 30 int rounds, int blocks); 33 int rounds, int blocks, u8 iv[]); 35 int rounds, int blocks, u8 iv[]); 38 int rounds, int blocks, u8 ctr[]); 41 int rounds, int blocks, u8 iv[], 44 int rounds, int blocks, u8 iv[], 171 unsigned int blocks; ecb_encrypt() local 179 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { ecb_encrypt() 181 (u8 *)ctx->key_enc, num_rounds(ctx), blocks); ecb_encrypt() 194 unsigned int blocks; ecb_decrypt() local 202 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { ecb_decrypt() 204 (u8 *)ctx->key_dec, num_rounds(ctx), blocks); ecb_decrypt() 217 unsigned int blocks; cbc_encrypt() local 225 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { cbc_encrypt() 227 (u8 *)ctx->key_enc, num_rounds(ctx), blocks, cbc_encrypt() 241 unsigned int blocks; cbc_decrypt() local 249 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { cbc_decrypt() 251 (u8 *)ctx->key_dec, num_rounds(ctx), blocks, cbc_decrypt() 265 int err, blocks; ctr_encrypt() local 272 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { ctr_encrypt() 274 (u8 *)ctx->key_enc, num_rounds(ctx), blocks, ctr_encrypt() 276 nbytes -= blocks * AES_BLOCK_SIZE; ctr_encrypt() 283 u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; ctr_encrypt() 284 u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; ctr_encrypt() 291 blocks = (nbytes <= 8) ? -1 : 1; ctr_encrypt() 294 num_rounds(ctx), blocks, walk.iv); ctr_encrypt() 309 unsigned int blocks; xts_encrypt() local 316 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { xts_encrypt() 318 (u8 *)ctx->key1.key_enc, rounds, blocks, xts_encrypt() 334 unsigned int blocks; xts_decrypt() local 341 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { xts_decrypt() 343 (u8 *)ctx->key1.key_dec, rounds, blocks, xts_decrypt()
|
H A D | aesbs-glue.c | 33 asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks, 116 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; aesbs_cbc_encrypt() local 127 } while (--blocks); aesbs_cbc_encrypt() 138 } while (--blocks); aesbs_cbc_encrypt() 164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; aesbs_cbc_decrypt() local 172 memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE); aesbs_cbc_decrypt() 178 iv = bk[blocks & 1]; aesbs_cbc_decrypt() 184 } while (--blocks); aesbs_cbc_decrypt() 209 u32 blocks; aesbs_ctr_encrypt() local 215 while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) { aesbs_ctr_encrypt() 221 if (unlikely(headroom < blocks)) { aesbs_ctr_encrypt() 222 blocks = headroom + 1; aesbs_ctr_encrypt() 223 tail = walk.nbytes - blocks * AES_BLOCK_SIZE; aesbs_ctr_encrypt() 227 walk.dst.virt.addr, blocks, aesbs_ctr_encrypt() 230 inc_be128_ctr(ctr, blocks); aesbs_ctr_encrypt() 232 nbytes -= blocks * AES_BLOCK_SIZE; aesbs_ctr_encrypt() 239 u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; aesbs_ctr_encrypt() 240 u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; aesbs_ctr_encrypt()
|
H A D | aes-ce-core.S | 162 * int blocks) 164 * int blocks) 220 * int blocks, u8 iv[]) 222 * int blocks, u8 iv[]) 280 * int blocks, u8 ctr[]) 322 bmi .Lctrhalfblock @ blocks < 0 means 1/2 block 359 * int blocks, u8 iv[], u8 const rk2[], int first) 361 * int blocks, u8 iv[], u8 const rk2[], int first) 409 vld1.8 {q0-q1}, [r1, :64]! @ get 3 pt blocks 420 vst1.8 {q0-q1}, [r0, :64]! @ write 3 ct blocks 460 vld1.8 {q0-q1}, [r1, :64]! @ get 3 ct blocks 471 vst1.8 {q0-q1}, [r0, :64]! @ write 3 pt blocks
|
H A D | ghash-ce-glue.c | 43 asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src, 64 int blocks; ghash_update() local 74 blocks = len / GHASH_BLOCK_SIZE; ghash_update() 78 pmull_ghash_update(blocks, ctx->digest, src, key, ghash_update() 81 src += blocks * GHASH_BLOCK_SIZE; ghash_update()
|
/linux-4.1.27/include/crypto/ |
H A D | sha1_base.h | 18 typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks); 45 int blocks; sha1_base_do_update() local 57 blocks = len / SHA1_BLOCK_SIZE; sha1_base_do_update() 60 if (blocks) { sha1_base_do_update() 61 block_fn(sctx, data, blocks); sha1_base_do_update() 62 data += blocks * SHA1_BLOCK_SIZE; sha1_base_do_update()
|
H A D | sha256_base.h | 19 int blocks); 66 int blocks; sha256_base_do_update() local 78 blocks = len / SHA256_BLOCK_SIZE; sha256_base_do_update() 81 if (blocks) { sha256_base_do_update() 82 block_fn(sctx, data, blocks); sha256_base_do_update() 83 data += blocks * SHA256_BLOCK_SIZE; sha256_base_do_update()
|
H A D | sha512_base.h | 19 int blocks); 68 int blocks; sha512_base_do_update() local 80 blocks = len / SHA512_BLOCK_SIZE; sha512_base_do_update() 83 if (blocks) { sha512_base_do_update() 84 block_fn(sctx, data, blocks); sha512_base_do_update() 85 data += blocks * SHA512_BLOCK_SIZE; sha512_base_do_update()
|
/linux-4.1.27/arch/x86/crypto/ |
H A D | cast5-avx-x86_64-asm_64.S | 219 * RL1: blocks 1 and 2 220 * RR1: blocks 3 and 4 221 * RL2: blocks 5 and 6 222 * RR2: blocks 7 and 8 223 * RL3: blocks 9 and 10 224 * RR3: blocks 11 and 12 225 * RL4: blocks 13 and 14 226 * RR4: blocks 15 and 16 228 * RL1: encrypted blocks 1 and 2 229 * RR1: encrypted blocks 3 and 4 230 * RL2: encrypted blocks 5 and 6 231 * RR2: encrypted blocks 7 and 8 232 * RL3: encrypted blocks 9 and 10 233 * RR3: encrypted blocks 11 and 12 234 * RL4: encrypted blocks 13 and 14 235 * RR4: encrypted blocks 15 and 16 291 * RL1: encrypted blocks 1 and 2 292 * RR1: encrypted blocks 3 and 4 293 * RL2: encrypted blocks 5 and 6 294 * RR2: encrypted blocks 7 and 8 295 * RL3: encrypted blocks 9 and 10 296 * RR3: encrypted blocks 11 and 12 297 * RL4: encrypted blocks 13 and 14 298 * RR4: encrypted blocks 15 and 16 300 * RL1: decrypted blocks 1 and 2 301 * RR1: decrypted blocks 3 and 4 302 * RL2: decrypted blocks 5 and 6 303 * RR2: decrypted blocks 7 and 8 304 * RL3: decrypted blocks 9 and 10 305 * RR3: decrypted blocks 11 and 12 306 * RL4: decrypted blocks 13 and 14 307 * RR4: decrypted blocks 15 and 16
|
H A D | camellia-aesni-avx-asm_64.S | 480 /* load blocks to registers and apply pre-whitening */ 503 /* byteslice pre-whitened blocks and store to temporary memory */ 526 /* de-byteslice, apply post-whitening and store blocks */ 724 * %xmm0..%xmm15: 16 plaintext blocks 726 * %xmm0..%xmm15: 16 encrypted blocks, order swapped: 810 * %xmm0..%xmm15: 16 encrypted blocks 812 * %xmm0..%xmm15: 16 plaintext blocks, order swapped: 890 * %rsi: dst (16 blocks) 891 * %rdx: src (16 blocks) 913 * %rsi: dst (16 blocks) 914 * %rdx: src (16 blocks) 941 * %rsi: dst (16 blocks) 942 * %rdx: src (16 blocks) 996 * %rsi: dst (16 blocks) 997 * %rdx: src (16 blocks) 1109 * %rsi: dst (16 blocks) 1110 * %rdx: src (16 blocks) 1243 * %rsi: dst (16 blocks) 1244 * %rdx: src (16 blocks) 1257 * %rsi: dst (16 blocks) 1258 * %rdx: src (16 blocks)
|
H A D | camellia-aesni-avx2-asm_64.S | 519 /* load blocks to registers and apply pre-whitening */ 542 /* byteslice pre-whitened blocks and store to temporary memory */ 565 /* de-byteslice, apply post-whitening and store blocks */ 764 * %ymm0..%ymm15: 32 plaintext blocks 766 * %ymm0..%ymm15: 32 encrypted blocks, order swapped: 850 * %ymm0..%ymm15: 16 encrypted blocks 852 * %ymm0..%ymm15: 16 plaintext blocks, order swapped: 930 * %rsi: dst (32 blocks) 931 * %rdx: src (32 blocks) 957 * %rsi: dst (32 blocks) 958 * %rdx: src (32 blocks) 989 * %rsi: dst (32 blocks) 990 * %rdx: src (32 blocks) 1069 * %rsi: dst (32 blocks) 1070 * %rdx: src (32 blocks) 1213 * %rsi: dst (32 blocks) 1214 * %rdx: src (32 blocks) 1358 * %rsi: dst (32 blocks) 1359 * %rdx: src (32 blocks) 1373 * %rsi: dst (32 blocks) 1374 * %rdx: src (32 blocks)
|
/linux-4.1.27/fs/jfs/ |
H A D | jfs_extent.c | 93 /* This blocks if we are low on resources */ extAlloc() 116 * extent if we can allocate the blocks immediately extAlloc() 127 /* allocate the disk blocks for the extent. initially, extBalloc() extAlloc() 128 * will try to allocate disk blocks for the requested size (xlen). extAlloc() 129 * if this fails (xlen contiguous free blocks not available), it'll extAlloc() 130 * try to allocate a smaller number of blocks (producing a smaller extAlloc() 131 * extent), with this smaller number of blocks consisting of the extAlloc() 132 * requested number of blocks rounded down to the next smaller extAlloc() 134 * and retry the allocation until the number of blocks to allocate extAlloc() 135 * is smaller than the number of blocks per page. extAlloc() 143 /* Allocate blocks to quota. */ extAlloc() 164 * free the newly allocated blocks and return the error. extAlloc() 223 /* This blocks if we are low on resources */ extRealloc() 232 * number of blocks. extRealloc() 248 /* try to allocated the request number of blocks for the extRealloc() 251 * try to allocate a new set of blocks large enough for the extRealloc() 259 /* Allocat blocks to quota. */ extRealloc() 321 /* perform the insert. if it fails, free the blocks extRealloc() 480 * FUNCTION: allocate disk blocks to form an extent. 482 * initially, we will try to allocate disk blocks for the 484 * contiguous free blocks not available), we'll try to allocate 485 * a smaller number of blocks (producing a smaller extent), with 486 * this smaller number of blocks consisting of the requested 487 * number of blocks rounded down to the next smaller power of 2 489 * retry the allocation until the number of blocks to allocate 490 * is smaller than the number of blocks per page. 497 * exit, this value is set to the number of blocks actually 518 /* get the number of blocks to initially attempt to allocate. extBalloc() 519 * we'll first try the number of blocks requested unless this extBalloc() 521 * blocks in the map. in that case, we'll start off with the extBalloc() 530 /* try to allocate blocks */ extBalloc() 575 * to a new set of blocks. If moving the extent, we initially 576 * will try to allocate disk blocks for the requested size 577 * (newnblks). if this fails (new contiguous free blocks not 579 * blocks (producing a smaller extent), with this smaller 580 * number of blocks consisting of the requested number of 581 * blocks rounded down to the next smaller power of 2 583 * retry the allocation until the number of blocks to allocate 584 * is smaller than the number of blocks per page. 589 * nblks - number of blocks within the extents current allocation. 591 * the new desired extent size (number of blocks). on 593 * new size (new number of blocks). 617 * try to move the extent to a new set of blocks. extBrealloc() 627 * FUNCTION: round down a specified number of blocks to the next
|
H A D | jfs_superblock.h | 40 __le64 s_size; /* 8: aggregate size in hardware/LVM blocks; 41 * VFS: number of blocks 52 __le32 s_agsize; /* 4: allocation group size in aggr. blocks */ 77 __le32 s_fsckloglen; /* 4: Number of filesystem blocks reserved for 79 * N.B. These blocks are divided among the 82 * N.B. These blocks are included in the
|
H A D | jfs_dmap.c | 319 * the blocks will be free from the working map one dmap 325 * nblocks - number of blocks to be freed. 354 * TRIM the blocks, when mounted with discard option dbFree() 361 * free the blocks a dmap at a time. dbFree() 379 /* determine the number of blocks to be freed from dbFree() 384 /* free the blocks. */ dbFree() 408 * the blocks will be updated in the persistent map one 416 * nblocks - number of contiguous blocks in the range. 438 /* the blocks better be within the mapsize. */ dbUpdatePMap() 443 jfs_error(ipbmap->i_sb, "blocks are outside the map\n"); dbUpdatePMap() 474 * the starting block. also determine how many blocks dbUpdatePMap() 614 /* determine the average number of free blocks within the ags. */ dbNextAG() 666 * blocks from the working allocation block map. 671 * for allocation requests smaller than the number of blocks 672 * per dmap, we first try to allocate the new blocks 673 * immediately following the hint. if these blocks are not 674 * available, we try to allocate blocks near the hint. if 675 * no blocks near the hint are available, we next try to 678 * if no blocks are available in the dmap or the allocation 691 * nblocks - number of contiguous blocks in the range. 715 /* get the log2 number of blocks to be allocated. dbAlloc() 716 * if the number of blocks is not a log2 multiple, dbAlloc() 731 /* if the number of blocks to be allocated is greater than the dbAlloc() 789 * blocks beginning at the hint. dbAlloc() 814 /* next, try to satisfy the allocation request with blocks dbAlloc() 827 /* try to satisfy the allocation request with blocks within dbAlloc() 843 /* try to satisfy the allocation request with blocks within dbAlloc() 908 * max 64 blocks will be moved. dbAllocExact() 949 * number of blocks. 953 * place by allocating the additional blocks as the blocks 955 * blocks are not available, this routine will attempt to 956 * allocate a new set of contiguous blocks large enough 958 * number of blocks required. 963 * nblocks - number of contiguous blocks within the current 965 * addnblocks - number of blocks to add to the allocation. 994 * new set of blocks for the entire request (i.e. try to get dbReAlloc() 995 * a range of contiguous blocks large enough to cover the dbReAlloc() 996 * existing allocation plus the additional blocks.) dbReAlloc() 1007 * number of blocks. 1011 * place by allocating the additional blocks as the blocks 1017 * nblocks - number of contiguous blocks within the current 1019 * addnblocks - number of blocks to add to the allocation. 1063 * allocating the additional blocks as the blocks immediately dbExtend() 1065 * current allocation in place if the number of additional blocks dbExtend() 1088 /* try to allocate the blocks immediately following the dbExtend() 1109 * FUNCTION: attempt to allocate the blocks of the specified block 1116 * nblocks - number of contiguous free blocks of the range. 1168 * tree will be examined to determine if the blocks are free. a dbAllocNext() 1199 * if the blocks are free. dbAllocNext() 1223 /* allocate the blocks. dbAllocNext() 1232 * FUNCTION: attempt to allocate a number of contiguous free blocks near 1244 * nblocks - actual number of contiguous free blocks desired. 1245 * l2nb - log2 number of contiguous free blocks desired. 1299 /* allocate the blocks. dbAllocNear() 1315 * free blocks within the specified allocation group. 1318 * of blocks per dmap, the dmap control pages will be used to 1353 * nblocks - actual number of contiguous free blocks desired. 1354 * l2nb - log2 number of contiguous free blocks desired. 1410 printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n", dbAllocAG() 1497 * blocks required is less than maximum number of blocks dbAllocAG() 1518 /* allocate the blocks. dbAllocAG() 1523 "unable to allocate blocks\n"); dbAllocAG() 1542 * free blocks anywhere in the file system. 1552 * nblocks - actual number of contiguous free blocks desired. 1553 * l2nb - log2 number of contiguous free blocks desired. 1578 /* allocate the blocks. dbAllocAny() 1582 jfs_error(bmp->db_ipbmap->i_sb, "unable to allocate blocks\n"); dbAllocAny() 1592 * FUNCTION: attempt to discard (TRIM) all free blocks of specific AG 1595 * 1) allocate blocks, as large as possible and save them 1598 * 3) mark the blocks free again 1614 * minlen - minimum value of contiguous blocks 1617 * s64 - actual number of blocks trimmed 1674 /* Trim any already allocated blocks */ dbDiscardAG() 1704 * contiguous free blocks large enough to satisfy an allocation 1705 * request for the specified number of free blocks. 1707 * if sufficient contiguous free blocks are found, this routine 1709 * contains or starts a range of contiqious free blocks that 1715 * l2nb - log2 number of contiguous free blocks desired. 1718 * that contains or starts a range of contiguous free blocks. 1738 * sufficient free blocks. dbFindCtl() 1787 * the number of blocks required is greater than or equal dbFindCtl() 1788 * to the maximum number of blocks described at the next dbFindCtl() 1804 * blocks starting within a specific dmap. 1812 * that is made up of the blocks of one or more dmaps. these 1818 * group whose size is equal to the number of blocks per dmap. 1831 * nblocks - actual number of contiguous free blocks to allocate. 1832 * l2nb - log2 number of contiguous free blocks to allocate. 1864 /* try to allocate the blocks. dbAllocCtl() 1880 /* allocate the blocks dmap by dmap. dbAllocCtl() 1903 /* determine how many blocks to allocate from this dmap. dbAllocCtl() 1907 /* allocate the blocks from the dmap. dbAllocCtl() 1928 * system to indicate that blocks have been leaked. dbAllocCtl() 1942 * to indicate that we have leaked blocks. dbAllocCtl() 1950 /* free the blocks is this dmap. dbAllocCtl() 1954 * to indicate that we have leaked blocks. dbAllocCtl() 1973 * FUNCTION: attempt to allocate a specified number of contiguous blocks 1976 * this routine checks if the contiguous blocks are available. 1977 * if so, nblocks of blocks are allocated; otherwise, ENOSPC is 1982 * dp - pointer to dmap to attempt to allocate blocks from. 2003 /* can't be more than a dmaps worth of blocks */ dbAllocDmapLev() 2025 /* allocate the blocks */ dbAllocDmapLev() 2039 * this routine allocates the specified blocks from the dmap 2041 * block range causes the maximum string of free blocks within 2052 * nblocks - number of blocks to be allocated. 2071 /* allocate the specified (blocks) bits */ dbAllocDmap() 2095 * this routine frees the specified blocks from the dmap through 2097 * causes the maximum string of free blocks within the dmap to 2107 * nblocks - number of blocks to be freed. 2126 /* free the specified (blocks) bits */ dbFreeDmap() 2142 * the freed blocks to become part of a larger binary buddy dbFreeDmap() 2471 * maximum string of free blocks (i.e. a change in the root 2473 * or deallocation of a range of blocks with a single dmap. 2653 * must be split to, specified as the log2 number of blocks. 2849 * the left buddy gets to claim the blocks resulting dbJoin() 2953 * FUNCTION: search a dmtree_t for sufficient free blocks, returning 2954 * the index of a leaf describing the free blocks if 2955 * sufficient free blocks are found. 2963 * l2nb - log2 number of free blocks to search for. 2965 * describing at least l2nb free blocks if sufficient 2966 * free blocks are found. 2970 * -ENOSPC - insufficient free blocks. 3155 * nb - number of blocks 3158 * log2 number of blocks 3196 * the blocks will be alloc from the working map one dmap 3202 * nblocks - number of blocks to be freed. 3223 * allocate the blocks a dmap at a time. dbAllocBottomUp() 3241 /* determine the number of blocks to be allocated from dbAllocBottomUp() 3246 /* allocate the blocks. */ dbAllocBottomUp() 3677 /* determine how many blocks are in the inactive allocation dbFinalizeBmap() 3686 /* determine how many free blocks are in the active dbFinalizeBmap() 3687 * allocation groups plus the average number of free blocks dbFinalizeBmap() 3734 * for the specified number of blocks: 3737 * The number of blocks will only account for the actually 3738 * existing blocks. Blocks which don't actually exist in 3743 * nblocks - number of blocks this page 3806 * blocks) as allocated (ONES) dbInitDmap() 3812 /* the first word beyond the end of existing blocks */ dbInitDmap() 3864 * note: leaf is set to NOFREE(-1) if all blocks of corresponding dbInitDmapTree() 3890 * l2min - Number of blocks that can be covered by a leaf 4000 * nblocks - Number of blocks in aggregate 4002 * RETURNS: log2(allocation group size) in aggregate blocks 4032 * FUNCTION: compute number of blocks the block allocation map file 4035 * RETURNS: Number of blocks which can be covered by this block map file; 4086 /* convert the number of dmaps into the number of blocks dbMapFileSizeToMapSize()
|
H A D | jfs_dmap.h | 38 #define NOFREE ((s8) -1) /* no blocks free */ 45 * maximum l2 number of disk blocks at the various dmapctl levels. 52 * maximum number of disk blocks at the various dmapctl levels. 76 * describing the disk block. s is the log2(number of logical blocks per page) 90 * dmapctl describing the disk block. s is the log2(number of logical blocks 105 * dmapctl describing the disk block. s is the log2(number of logical blocks 158 * dmap page per 8K blocks bitmap 205 __le64 dn_mapsize; /* 8: number of blocks in aggregate */ 224 s64 dn_mapsize; /* number of blocks in aggregate */ 272 /* convert number of blocks to log2 number of blocks, rounding up to 273 * the next log2 value if blocks is not a l2 multiple.
|
H A D | resize.c | 51 * new LVSize: in LV blocks (required) 52 * new LogSize: in LV blocks (optional) 53 * new FSSize: in LV blocks (optional) 151 * convert the newLogSize to fs blocks. jfs_extendfs() 284 * s_size: aggregate size in physical blocks; jfs_extendfs() 291 * di_nblocks: number of blocks allocated for map file; jfs_extendfs() 292 * di_mapsize: number of blocks in aggregate (covered by map); jfs_extendfs() 294 * db_mapsize: number of blocks in aggregate (covered by map); jfs_extendfs() 312 /* compute number of blocks requested to extend */ jfs_extendfs() 318 /* compute number of blocks that can be extended by current mapfile */ jfs_extendfs() 351 * allocate new map pages and its backing blocks, and jfs_extendfs() 420 /* any more blocks to extend ? */ jfs_extendfs()
|
H A D | jfs_types.h | 148 u8 limit_hi; /* DASD limit (in logical blocks) */ 149 __le32 limit_lo; /* DASD limit (in logical blocks) */ 151 u8 used_hi; /* DASD usage (in logical blocks) */ 152 __le32 used_lo; /* DASD usage (in logical blocks) */
|
H A D | jfs_discard.c | 38 * nblocks - number of blocks to be trimmed 69 * FUNCTION: attempt to discard (TRIM) all free blocks from the
|
/linux-4.1.27/drivers/mtd/ |
H A D | rfd_ftl.c | 87 struct block *blocks; member in struct:partition 94 struct block *block = &part->blocks[block_no]; build_block_map() 187 part->blocks = kcalloc(part->total_blocks, sizeof(struct block), scan_header() 189 if (!part->blocks) scan_header() 236 kfree(part->blocks); scan_header() 279 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr || erase_callback() 291 part->blocks[i].state = BLOCK_FAILED; erase_callback() 292 part->blocks[i].free_sectors = 0; erase_callback() 293 part->blocks[i].used_sectors = 0; erase_callback() 302 part->blocks[i].state = BLOCK_ERASED; erase_callback() 303 part->blocks[i].free_sectors = part->data_sectors_per_block; erase_callback() 304 part->blocks[i].used_sectors = 0; erase_callback() 305 part->blocks[i].erases++; erase_callback() 307 rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic), erase_callback() 317 part->blocks[i].offset); erase_callback() 318 part->blocks[i].state = BLOCK_FAILED; erase_callback() 321 part->blocks[i].state = BLOCK_OK; erase_callback() 337 erase->addr = part->blocks[block].offset; erase_block() 341 part->blocks[block].state = BLOCK_ERASING; erase_block() 342 part->blocks[block].free_sectors = 0; erase_block() 374 rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset, move_block_contents() 383 part->blocks[block_no].offset); move_block_contents() 403 addr = part->blocks[block_no].offset + move_block_contents() 408 if (!part->blocks[block_no].used_sectors--) { move_block_contents() 471 if (part->blocks[block].free_sectors) reclaim_block() 474 this_score = part->blocks[block].used_sectors; reclaim_block() 480 if (part->blocks[block].used_sectors == reclaim_block() 485 this_score += part->blocks[block].erases; reclaim_block() 501 part->blocks[best_block].used_sectors, reclaim_block() 502 part->blocks[best_block].free_sectors); reclaim_block() 504 if (part->blocks[best_block].used_sectors) reclaim_block() 526 if (part->blocks[block].free_sectors && find_free_block() 530 if (part->blocks[block].state == BLOCK_UNUSED) find_free_block() 563 rc = mtd_read(part->mbd.mtd, part->blocks[block].offset, find_writable_block() 573 part->blocks[block].offset); find_writable_block() 594 addr = part->blocks[block].offset + mark_sector_deleted() 610 part->blocks[block].used_sectors--; mark_sector_deleted() 612 if (!part->blocks[block].used_sectors && mark_sector_deleted() 613 !part->blocks[block].free_sectors) mark_sector_deleted() 650 !part->blocks[part->current_block].free_sectors) { do_writesect() 657 block = &part->blocks[part->current_block]; do_writesect() 809 part->mbd.mtd->name, i, part->blocks[i].erases); rfd_ftl_remove_dev() 815 kfree(part->blocks); rfd_ftl_remove_dev()
|
/linux-4.1.27/arch/cris/boot/rescue/ |
H A D | head_v32.S | 19 ;; Start clocks for used blocks.
|
/linux-4.1.27/net/mac80211/ |
H A D | michael.c | 60 size_t block, blocks, left; michael_mic() local 66 blocks = data_len / 4; michael_mic() 69 for (block = 0; block < blocks; block++) michael_mic() 78 val |= data[blocks * 4 + left]; michael_mic()
|
/linux-4.1.27/drivers/mmc/core/ |
H A D | sdio_ops.c | 122 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) mmc_io_rw_extended() 148 if (blocks == 0) mmc_io_rw_extended() 151 cmd.arg |= 0x08000000 | blocks; /* block mode */ mmc_io_rw_extended() 155 /* Code in host drivers/fwk assumes that "blocks" always is >=1 */ mmc_io_rw_extended() 156 data.blocks = blocks ? blocks : 1; mmc_io_rw_extended() 159 left_size = data.blksz * data.blocks; mmc_io_rw_extended() 121 mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) mmc_io_rw_extended() argument
|
H A D | sdio_ops.h | 19 unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
|
/linux-4.1.27/fs/jffs2/ |
H A D | jffs2_fs_sb.h | 79 /* Number of free blocks there must be before we... */ 85 /* Number of 'very dirty' blocks before we trigger immediate GC */ 91 struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks member in struct:jffs2_sb_info 92 * from the offset (blocks[ofs / sector_size]) */ 107 struct list_head bad_list; /* Bad blocks. */ 108 struct list_head bad_used_list; /* Bad blocks with valid data in. */
|
H A D | build.c | 319 split across blocks, etc. */ jffs2_calc_trigger_levels() 335 /* When do we allow garbage collection to eat from bad blocks rather jffs2_calc_trigger_levels() 351 dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", jffs2_calc_trigger_levels() 361 dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", jffs2_calc_trigger_levels() 365 dbg_fsbuild("Very dirty blocks before GC triggered: %d\n", jffs2_calc_trigger_levels() 380 c->blocks = vzalloc(size); jffs2_do_mount_fs() 383 c->blocks = kzalloc(size, GFP_KERNEL); jffs2_do_mount_fs() 384 if (!c->blocks) jffs2_do_mount_fs() 388 INIT_LIST_HEAD(&c->blocks[i].list); jffs2_do_mount_fs() 389 c->blocks[i].offset = i * c->sector_size; jffs2_do_mount_fs() 390 c->blocks[i].free_size = c->sector_size; jffs2_do_mount_fs() 427 vfree(c->blocks); jffs2_do_mount_fs() 430 kfree(c->blocks); jffs2_do_mount_fs()
|
/linux-4.1.27/fs/reiserfs/ |
H A D | xattr_security.c | 54 * of blocks needed for the transaction. If successful, reiserfs_security 60 int blocks = 0; reiserfs_security_init() local 82 blocks = reiserfs_xattr_jcreate_nblocks(inode) + reiserfs_security_init() 88 return blocks; reiserfs_security_init()
|
H A D | resize.c | 71 /* count bitmap blocks in new fs */ reiserfs_resize() 119 * allocate additional bitmap blocks, reallocate reiserfs_resize() 167 /* free old bitmap blocks array */ reiserfs_resize() 181 /* Extend old last bitmap block - new blocks have been made available */ reiserfs_resize()
|
H A D | bitmap.c | 172 return 0; /* No free blocks in this bitmap */ scan_bitmap_block() 201 * searching for free blocks for unformatted nodes) scan_bitmap_block() 217 * try to set all blocks used checking are scan_bitmap_block() 232 * of allocated blocks, if length of scan_bitmap_block() 345 * bitmap and place new blocks there. Returns number of allocated blocks. 360 /* No point in looking for more free blocks */ scan_bitmap() 490 /* preallocated blocks don't need to be run through journal_mark_freed */ reiserfs_free_prealloc_block() 513 "inode has negative prealloc blocks count."); __discard_prealloc() 551 "no preallocated blocks."); reiserfs_discard_all_prealloc() 1065 * preallocation, new blocks are displaced based on directory ID. determine_search_start() 1125 /* we preallocate blocks only for regular files, specific size */ determine_prealloc_size() 1157 if (nr_allocated == 0) /* no new blocks allocated, return */ allocate_without_wrapping_disk() 1201 "reiserquota: allocating %d blocks id=%u", blocknrs_and_prealloc_arrays_from_search_start() 1214 "reiserquota: allocating (prealloc) %d blocks id=%u", blocknrs_and_prealloc_arrays_from_search_start() 1242 /* Free the blocks */ blocknrs_and_prealloc_arrays_from_search_start() 1246 "reiserquota: freeing (nospace) %d blocks id=%u", blocknrs_and_prealloc_arrays_from_search_start() 1252 /* Free not allocated blocks */ blocknrs_and_prealloc_arrays_from_search_start() 1279 /* Some of preallocation blocks were not allocated */ blocknrs_and_prealloc_arrays_from_search_start() 1282 "reiserquota: freeing (failed prealloc) %d blocks id=%u", blocknrs_and_prealloc_arrays_from_search_start() 1322 /* return amount still needed after using preallocated blocks */ use_preallocated_list_if_available() 1329 /* Amount of blocks we have already reserved */ reiserfs_allocate_blocknrs() 1372 * If final allocation fails we need to return blocks back to reiserfs_allocate_blocknrs()
|
H A D | file.c | 61 * freeing preallocation only involves relogging blocks that reiserfs_file_release() 64 * us to log any additional blocks (including quota blocks) reiserfs_file_release() 70 * is still preallocation blocks pending. Try to join the reiserfs_file_release() 83 * will leak blocks on disk. Lets pin the inode reiserfs_file_release()
|
/linux-4.1.27/fs/ufs/ |
H A D | ufs_fs.h | 69 * For filesystem fs, the offsets of the various blocks of interest 73 * [fs->fs_iblkno] Inode blocks 74 * [fs->fs_dblkno] Data blocks 190 * blocks which may be free. If the freelist drops below this level 191 * only the superuser may continue to allocate blocks. This may 192 * be set to 0 if no reserve of free blocks is deemed necessary, 205 * This maps file system blocks to device size blocks. 305 __fs32 cs_nbfree; /* number of free blocks */ 311 __fs64 cs_nbfree; /* number of free blocks */ 320 __u64 cs_nbfree; /* number of free blocks */ 341 * it may occupy several blocks, use 356 __fs32 fs_iblkno; /* offset of inode-blocks in filesys */ 361 __fs32 fs_size; /* number of blocks in fs */ 362 __fs32 fs_dsize; /* number of data blocks in fs */ 364 __fs32 fs_bsize; /* size of basic blocks in fs */ 365 __fs32 fs_fsize; /* size of frag blocks in fs */ 368 __fs32 fs_minfree; /* minimum percentage of free blocks */ 419 __fs32 fs_fpg; /* blocks per group * fs_frag */ 453 __fs64 fs_size; /* number of blocks in fs */ 454 __fs64 fs_dsize; /* number of data blocks in fs */ 456 __fs64 fs_pendingblocks;/* blocks in process of being freed */ 491 __fs32 fs_rotbloff; /* (__u8) blocks for each rotation */ 493 __u8 fs_space[1]; /* list of blocks for each rotation */ 542 __fs16 cg_niblk; /* number of inode blocks this cg */ 543 __fs32 cg_ndblk; /* number of data blocks this cg */ 565 __fs32 cg_niblk; /* number of inode blocks this cg */ 584 __fs16 cg_niblk; /* number of inode blocks this cg */ 585 __fs32 cg_ndblk; /* number of data blocks this cg */ 592 __fs16 cg_b[32][8]; /* positions of free blocks */ 619 __fs32 ui_db[UFS_NDADDR];/* 0x28 data blocks */ 620 __fs32 ui_ib[UFS_NINDIR];/* 0x58 indirect blocks */ 625 __fs32 ui_blocks; /* 0x68 blocks in use */ 673 __fs64 ui_db[UFS_NDADDR]; /* 112: Direct disk blocks. */ 674 __fs64 ui_ib[UFS_NINDIR];/* 208: Indirect disk blocks.*/ 711 __u16 c_niblk; /* number of inode blocks this cg */ 712 __u32 c_ndblk; /* number of data blocks this cg */ 730 __u32 s_sblkno; /* offset of super-blocks in filesys */ 732 __u32 s_iblkno; /* offset of inode-blocks in filesys */ 736 __u32 s_size; /* number of blocks (fragments) in fs */ 737 __u32 s_dsize; /* number of data blocks in fs */ 738 __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */ 739 __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */ 741 __u32 s_bsize; /* size of basic blocks */ 744 __u32 s_minfree; /* minimum percentage of free blocks */ 775 __s32 s_rotbloff; /* (__u8) blocks for each rotation */ 917 __fs64 fs_size; /* number of blocks in fs */ 918 __fs64 fs_dsize; /* number of data blocks in fs */ 920 __fs64 fs_pendingblocks;/* blocks in process of being freed */
|
/linux-4.1.27/lib/zlib_inflate/ |
H A D | infutil.h | 1 /* infutil.h -- types and macros common to blocks and codes
|
/linux-4.1.27/drivers/usb/isp1760/ |
H A D | isp1760-hcd.h | 13 * - 32 blocks @ 256 bytes 14 * - 20 blocks @ 1024 bytes 15 * - 4 blocks @ 8192 bytes
|
/linux-4.1.27/fs/gfs2/ |
H A D | bmap.h | 21 * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file 24 * @data_blocks: returns the number of data blocks required 25 * @ind_blocks: returns the number of indirect blocks required
|
H A D | trans.h | 29 /* reserve either the number of blocks to be allocated plus the rg header 30 * block, or all of the blocks in the rg, whichever is smaller */ gfs2_rg_blocks() 38 extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
|
H A D | trans.c | 31 int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, gfs2_trans_begin() argument 38 BUG_ON(blocks == 0 && revokes == 0); gfs2_trans_begin() 48 tr->tr_blocks = blocks; gfs2_trans_begin() 52 if (blocks) gfs2_trans_begin() 53 tr->tr_reserved += 6 + blocks; gfs2_trans_begin() 80 pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n", gfs2_print_trans() 155 * blocks, which isn't an enormous overhead but twice as much as 156 * for normal metadata blocks.
|
H A D | trace_gfs2.h | 376 /* Reserving/releasing blocks in the log */ 379 TP_PROTO(const struct gfs2_sbd *sdp, int blocks), 381 TP_ARGS(sdp, blocks), 385 __field( int, blocks ) 390 __entry->blocks = blocks; 394 MINOR(__entry->dev), __entry->blocks) 429 * Correctness: Test of disard generation vs. blocks allocated 432 /* Map an extent of blocks, possibly a new allocation */ 472 /* Keep track of blocks as they are allocated/freed */
|
H A D | bmap.c | 348 * gfs2_extent_length - Returns length of an extent of blocks 356 * unallocated blocks in the extent, otherwise it will return the number 357 * of contiguous blocks in the extent. 432 * @maxlen: The max number of data blocks to alloc 435 * i) Indirect blocks to grow the metadata tree height 436 * ii) Indirect blocks to fill in lower part of the metadata tree 437 * iii) Data blocks 440 * number of blocks which we need. The second part does the actual 442 * blocks are available, there will only be one request per bmap call) 443 * and uses the state machine to initialise the blocks in order. 484 /* Need to allocate indirect blocks */ gfs2_bmap_alloc() 557 /* Tree complete, adding data blocks */ gfs2_bmap_alloc() 594 * @create: True if its ok to alloc blocks to satify the request 598 * mapped. Sets buffer_new() if new blocks were allocated. 674 /* At this point ret is the tree depth of already allocated blocks */ gfs2_block_map() 1353 * @blocks: Size of extent in fs blocks 1358 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks) gfs2_add_jextent() argument 1364 if ((jext->dblock + jext->blocks) == dblock) { gfs2_add_jextent() 1365 jext->blocks += blocks; gfs2_add_jextent() 1375 jext->blocks = blocks; gfs2_add_jextent() 1387 * blocks to all physical blocks for the given journal. This will save 1388 * us time when writing journal blocks. Most journals will have only one 1389 * extent that maps all their logical blocks. That's because gfs2.mkfs 1390 * arranges the journal blocks sequentially to maximize performance.
|
H A D | log.c | 42 * Compute the number of log descriptor blocks needed to hold a certain number 45 * Returns: the number of blocks needed (minimum is always 1) 304 * gfs2_log_release - Release a given number of log blocks 306 * @blks: The number of blocks 323 * @blks: The number of blocks to reserve 325 * Note that we never give out the last few blocks of the journal. Thats 326 * due to the fact that there is a small number of header blocks 328 * flush time, so we ensure that we have just enough free blocks at all 393 * log_distance - Compute distance between two journal blocks 399 * blocks in the journal 401 * Returns: the distance in blocks 417 * calc_reserved - Calculate the number of blocks to reserve when 436 * Also, we need to reserve blocks for revoke journal entries and one for an 439 * Returns: the number of blocks reserved 452 /* Account for header blocks */ calc_reserved() 620 /* If no blocks have been reserved, we need to also gfs2_write_revokes() 845 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 846 * or the total number of used blocks (pinned blocks plus AIL blocks) 901 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
|
H A D | rgrp.h | 17 * bitmap, one 64-bit word in the bitmap will represent 32 blocks. 18 * By reserving 32 blocks at a time, we can optimize / shortcut how we search
|
H A D | rgrp.c | 54 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks. 157 * This allows searching of a whole u64 at once (32 blocks) with a 179 * @len: number of blocks in the new reservation 311 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned 313 * @n_unaligned: Number of unaligned blocks to check 339 * gfs2_free_extlen - Return extent length of free blocks 343 * Starting at the block specified by the rbm, see how many free blocks 344 * there are, not reading more than len blocks ahead. This can be done 345 * using memchr_inv when the blocks are byte aligned, but has to be done 346 * on a block by block basis in case of unaligned blocks. Also this 350 * Returns: Number of free blocks in the extent 454 /* Count # blocks in each of 4 possible allocation states */ gfs2_rgrp_verify() 650 /* return reserved blocks to the rgrp */ __rs_deltree() 654 it will only do so if the freed blocks are somehow __rs_deltree() 655 contiguous with a span of free blocks that follows. Still, __rs_deltree() 699 * return_all_reservations - return all reserved blocks back to the rgrp. 702 * We previously reserved a bunch of blocks for allocation. Now we need to 768 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */ compute_bitstructs() 806 /* other blocks */ compute_bitstructs() 1140 * Read in all of a Resource Group's header and bitmap blocks. 1472 rgd->rd_reserved += rs->rs_free; /* blocks reserved */ rs_insert() 1478 * rg_mblk_search - find a group of multiple free blocks to form a reservation 1480 * @ip: pointer to the inode for which we're reserving blocks 1577 * @ip: The inode for which we are searching for blocks 1633 * gfs2_rbm_find - Look for blocks of a particular state 1644 * - If looking for free blocks, we set GBF_FULL on each bitmap which 1645 * has no free blocks in it. 1646 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which 1949 * We try our best to find an rgrp that has at least ap->target blocks 1952 * atleast ap->min_target blocks available. Either way, we set ap->allowed to 1953 * the number of blocks available in the chosen rgrp. 2065 /* If we've scanned all the rgrps, but found no free blocks gfs2_inplace_reserve() 2155 * @bstart: the start of a run of blocks to free 2235 * @ip: The inode we have just allocated blocks for 2236 * @rbm: The start of the allocated blocks 2265 reserve more blocks next time. */ gfs2_adjust_reservation() 2304 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode 2307 * @nblocks: requested number of blocks/extent length (value/result) 2333 /* Since all blocks are reserved in advance, this shouldn't happen */ gfs2_alloc_blocks() 2399 * @ip: the inode these blocks are being freed from 2400 * @bstart: first block of a run of contiguous blocks 2402 * @meta: 1 if the blocks represent metadata 2428 * @ip: the inode these blocks are being freed from 2429 * @bstart: first block of a run of contiguous blocks
|
H A D | quota.c | 802 unsigned int nalloc = 0, blocks; do_sync() local 844 * two blocks need to be updated instead of 1 */ do_sync() 845 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; do_sync() 854 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; do_sync() 856 error = gfs2_trans_begin(sdp, blocks, 0); do_sync() 1098 * gfs2_quota_check - check if allocating new blocks will exceed quota 1103 * blocks. ap->min_target, if set, contains the minimum blks 1109 * ap->allowed is set to the number of blocks allowed 1112 * of blocks available. 1260 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift; gfs2_quota_init() local 1272 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; gfs2_quota_init() 1283 for (x = 0; x < blocks; x++) { gfs2_quota_init() 1520 state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks; gfs2_quota_get_state() 1575 unsigned int blocks = 0; gfs2_set_dqblk() local 1606 /* Check for existing entry, if none then alloc new blocks */ gfs2_set_dqblk() 1635 blocks = 1 + data_blocks + ind_blocks; gfs2_set_dqblk() 1636 ap.target = blocks; gfs2_set_dqblk() 1640 blocks += gfs2_rg_blocks(ip, blocks); gfs2_set_dqblk() 1643 /* Some quotas span block boundaries and can update two blocks, gfs2_set_dqblk() 1645 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0); gfs2_set_dqblk()
|
/linux-4.1.27/fs/ext2/ |
H A D | inode.c | 138 * data blocks at leaves and indirect blocks in intermediate nodes. 145 * we need to know is the capacity of indirect blocks (taken from the 153 * if our filesystem had 8Kb blocks. We might use long long, but that would 202 * ext2_get_branch - read the chain of indirect blocks leading to data 205 * @offsets: offsets of pointers in inode/indirect blocks 227 * or when it reads all @depth-1 indirect blocks successfully and finds 347 * of direct blocks need to be allocated for the given branch. 349 * @branch: chain of indirect blocks 350 * @k: number of blocks need for indirect blocks 351 * @blks: number of data blocks to be mapped. 354 * return the total number of blocks to be allocate, including the 355 * direct and indirect blocks. 365 * then it's clear blocks on that path have not allocated ext2_blks_to_allocate() 385 * ext2_alloc_blocks: multiple allocate blocks needed for a branch 386 * @indirect_blks: the number of blocks need to allocate for indirect 387 * blocks 390 * the indirect blocks(if needed) and the first direct block, 392 * direct blocks 405 * Here we try to allocate the requested multiple blocks at once, ext2_alloc_blocks() 407 * To build a branch, we should allocate blocks for ext2_alloc_blocks() 408 * the indirect blocks(if not allocated yet), and at least ext2_alloc_blocks() 410 * minimum number of blocks need to allocate(required) ext2_alloc_blocks() 416 /* allocating blocks for indirect blocks and direct blocks */ ext2_alloc_blocks() 422 /* allocate blocks for indirect blocks */ ext2_alloc_blocks() 435 /* total number of blocks allocated for direct blocks */ ext2_alloc_blocks() 448 * ext2_alloc_branch - allocate and set up a chain of blocks. 450 * @num: depth of the chain (number of blocks to allocate) 451 * @offsets: offsets (in the blocks) to store the pointers to next. 454 * This function allocates @num blocks, zeroes out all but the last one, 466 * If allocation fails we free all blocks we've allocated (and forget 491 * metadata blocks and data blocks are allocated. ext2_alloc_branch() 515 * data blocks numbers ext2_alloc_branch() 547 * @num: number of indirect blocks we are adding 548 * @blks: number of direct blocks we are adding 570 * direct blocks blocks ext2_splice_branch() 602 * to tree, set linkage between the newborn blocks, write them if sync is 605 * removals - all blocks on the path are immune now) and possibly force the 608 * allocations is needed - we simply release blocks and do not touch anything 613 * return > 0, # of blocks mapped or allocated. 647 /*map more blocks*/ ext2_get_blocks() 714 /* the number of blocks need to allocate for [d,t]indirect blocks */ ext2_get_blocks() 718 * direct blocks to allocate for this branch. ext2_get_blocks() 920 * ext2_find_shared - find the indirect blocks for partial truncation. 924 * @chain: place to store the pointers to partial indirect blocks 930 * blocks but leave the blocks themselves alive. Block is partially 941 * truncated blocks - in @chain[].bh and pointers to their last elements 1003 * ext2_free_data - free a list of data blocks 1008 * We are freeing all blocks referred from that array (numbers are 1021 /* accumulate blocks to free if they're contiguous */ ext2_free_data() 1048 * We are freeing all blocks referred from these branches (numbers are 1127 /* Clear the ends of indirect blocks on the shared branch */ __ext2_truncate_blocks() 1174 * IS_APPEND inode to have blocks-past-i_size trimmed off. ext2_truncate_blocks()
|
H A D | balloc.c | 22 * balloc.c contains the blocks allocation and deallocation routines 26 * The free blocks are managed by bitmaps. A file system contains several 27 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 28 * block for inodes, N blocks for the inode table and data blocks. 32 * the free blocks count in the block. The descriptors are loaded in memory 471 * ext2_free_blocks() -- Free given blocks and update quota and i_blocks 474 * @count: number of blocks to free 495 "Freeing blocks not in datazone - " ext2_free_blocks() 509 * Check to see if we are freeing blocks across a group ext2_free_blocks() 532 "Freeing blocks in system zones - " ext2_free_blocks() 611 * block within the next XX blocks. find_next_usable_block() 647 * @count: target number of blocks to allocate 650 * Attempt to allocate blocks within a give range. Set the range of allocation 652 * and at last, allocate the blocks by claiming the found free bit as allocated. 846 * disk bitmap later and then, if there are free blocks then we adjust find_next_reservable_window() 1030 * required number of free blocks 1032 * Since ext2_try_to_allocate() will always allocate blocks within 1034 * multiple blocks allocation has to stop at the end of the reservation 1036 * blocks needed and the current size of the window, we try to 1038 * basis before ext2_new_blocks() tries to allocate blocks. 1071 * @count: target number of blocks to allocate 1086 * reservation), and there are lots of free blocks, but they are all 1200 * @count: target number of blocks to allocate 1204 * free, or there is a free block within 32 blocks of the goal, that block 1222 ext2_grpblk_t free_blocks; /* number of free blocks in a group */ ext2_new_blocks() 1285 * if there is not enough free blocks to make a new resevation ext2_new_blocks() 1324 * are no free blocks ext2_new_blocks() 1330 * free blocks is less than half of the reservation ext2_new_blocks() 1351 * there maybe indeed free blocks available on disk ext2_new_blocks() 1380 "blocks from "E2FSBLK", length %lu", ext2_new_blocks() 1383 * ext2_try_to_allocate marked the blocks we allocated as in ext2_new_blocks() 1384 * use. So we may want to selectively mark some of the blocks ext2_new_blocks() 1394 "block("E2FSBLK") >= blocks count(%d) - " ext2_new_blocks() 1508 * ext2_bg_has_super - number of blocks used by the superblock in group 1512 * Return the number of blocks used by the superblock (primary or backup) 1524 * ext2_bg_num_gdb - number of blocks used by the group table in group 1528 * Return the number of blocks used by the group descriptor table 1530 * different number of descriptor blocks in each group.
|
H A D | ext2.h | 24 /* data type for filesystem-wide blocks number */ 72 unsigned long s_blocks_per_group;/* Number of blocks in a group */ 74 unsigned long s_itb_per_group; /* Number of inode table blocks per group */ 75 unsigned long s_gdb_count; /* Number of group descriptor blocks */ 128 * Define EXT2_RESERVATION to reserve data blocks for expanding files 131 /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ 156 #define EXT2_BAD_INO 1 /* Bad blocks inode */ 192 * Structure of a blocks group descriptor 199 __le16 bg_free_blocks_count; /* Free blocks count */ 215 * Constants relative to the data blocks 320 __le32 i_block[EXT2_N_BLOCKS];/* Pointers to blocks */ 418 __le32 s_r_blocks_count; /* Reserved blocks count */ 419 __le32 s_free_blocks_count; /* Free blocks count */ 439 __le16 s_def_resuid; /* Default uid for reserved blocks */ 440 __le16 s_def_resgid; /* Default gid for reserved blocks */ 468 __u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/ 560 * Default values for user and/or group using reserved blocks 667 * place a file's data blocks near its inode block, and new inodes
|
H A D | xattr.h | 12 /* Magic value in attribute blocks */ 29 __le32 h_blocks; /* number of disk blocks used */
|
/linux-4.1.27/drivers/mfd/ |
H A D | stmpe.c | 26 static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks) __stmpe_enable() argument 28 return stmpe->variant->enable(stmpe, blocks, true); __stmpe_enable() 31 static int __stmpe_disable(struct stmpe *stmpe, unsigned int blocks) __stmpe_disable() argument 33 return stmpe->variant->enable(stmpe, blocks, false); __stmpe_disable() 107 * stmpe_enable - enable blocks on an STMPE device 109 * @blocks: Mask of blocks (enum stmpe_block values) to enable 111 int stmpe_enable(struct stmpe *stmpe, unsigned int blocks) stmpe_enable() argument 116 ret = __stmpe_enable(stmpe, blocks); stmpe_enable() 124 * stmpe_disable - disable blocks on an STMPE device 126 * @blocks: Mask of blocks (enum stmpe_block values) to enable 128 int stmpe_disable(struct stmpe *stmpe, unsigned int blocks) stmpe_disable() argument 133 ret = __stmpe_disable(stmpe, blocks); stmpe_disable() 366 static int stmpe801_enable(struct stmpe *stmpe, unsigned int blocks, stmpe801_enable() argument 369 if (blocks & STMPE_BLOCK_GPIO) stmpe801_enable() 381 .blocks = stmpe801_blocks, 393 .blocks = stmpe801_blocks_noirq, 454 static int stmpe811_enable(struct stmpe *stmpe, unsigned int blocks, stmpe811_enable() argument 459 if (blocks & STMPE_BLOCK_GPIO) stmpe811_enable() 462 if (blocks & STMPE_BLOCK_ADC) stmpe811_enable() 465 if (blocks & STMPE_BLOCK_TOUCHSCREEN) stmpe811_enable() 485 .blocks = stmpe811_blocks, 500 .blocks = stmpe811_blocks, 603 static int stmpe1601_enable(struct stmpe *stmpe, unsigned int blocks, stmpe1601_enable() argument 608 if (blocks & STMPE_BLOCK_GPIO) stmpe1601_enable() 613 if (blocks & STMPE_BLOCK_KEYPAD) stmpe1601_enable() 618 if (blocks & STMPE_BLOCK_PWM) stmpe1601_enable() 649 .blocks = stmpe1601_blocks, 689 static int stmpe1801_enable(struct stmpe *stmpe, unsigned int blocks, stmpe1801_enable() argument 693 if (blocks & STMPE_BLOCK_GPIO) stmpe1801_enable() 696 if (blocks & STMPE_BLOCK_KEYPAD) stmpe1801_enable() 732 .blocks = stmpe1801_blocks, 776 static int stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks, stmpe24xx_enable() argument 781 if (blocks & STMPE_BLOCK_GPIO) stmpe24xx_enable() 784 if (blocks & STMPE_BLOCK_KEYPAD) stmpe24xx_enable() 813 .blocks = stmpe24xx_blocks, 827 .blocks = stmpe24xx_blocks, 1087 unsigned int platform_blocks = stmpe->pdata->blocks; stmpe_devices_init() 1092 struct stmpe_variant_block *block = &variant->blocks[i]; stmpe_devices_init() 1114 "platform wants blocks (%#x) not present on variant", stmpe_devices_init() 1143 pdata->blocks |= STMPE_BLOCK_GPIO; for_each_child_of_node() 1145 pdata->blocks |= STMPE_BLOCK_KEYPAD; for_each_child_of_node() 1147 pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN; for_each_child_of_node() 1149 pdata->blocks |= STMPE_BLOCK_ADC; for_each_child_of_node() 1151 pdata->blocks |= STMPE_BLOCK_PWM; for_each_child_of_node() 1153 pdata->blocks |= STMPE_BLOCK_ROTATOR; for_each_child_of_node()
|
H A D | stmpe.h | 54 * @blocks: list of blocks present on this device 55 * @num_blocks: number of blocks present on this device 57 * @enable: callback to enable the specified blocks. 70 struct stmpe_variant_block *blocks; member in struct:stmpe_variant_info 73 int (*enable)(struct stmpe *stmpe, unsigned int blocks, bool enable);
|
/linux-4.1.27/fs/ext3/ |
H A D | resize.c | 41 printk(KERN_DEBUG "EXT3-fs: adding %s group %u: %u blocks " verify_group_input() 55 ext3_warning(sb, __func__, "Reserved blocks too high (%u)", verify_group_input() 58 ext3_warning(sb, __func__, "Bad blocks count %u", verify_group_input() 75 "Inode table not in group (blocks %u-"E3FSBLK")", verify_group_input() 184 * changing blocks outside the actual filesystem. We still do journaling to 225 /* Copy all of the GDT blocks into the backup in this group */ setup_new_group_blocks() 258 /* Zero out all of the reserved backup group descriptor table blocks */ setup_new_group_blocks() 288 /* Zero out all of the inode table blocks */ setup_new_group_blocks() 382 * Check that all of the backup GDT blocks are held in the primary GDT block. 418 * The new backup GDT blocks will be stored as leaf blocks in this indirect 420 * we check to ensure that the resize inode has actually reserved these blocks. 422 * Don't need to update the block bitmaps because the blocks are still in use. 511 * the new GDT block for use (which also "frees" the backup GDT blocks add_new_gdb() 513 * these blocks, because they are marked as in-use from being in the add_new_gdb() 514 * reserved inode, and will become GDT blocks (primary and backup). add_new_gdb() 568 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 569 * We need to add these reserved backup GDT blocks to the resize inode, so 573 * The indirect blocks are actually the primary reserved GDT blocks, 576 * GDT blocks so we don't overwrite a data block by accident. The reserved 577 * backup GDT blocks are stored in their reserved primary GDT block. 649 * Finally we can add each of the reserved backup GDT blocks from reserve_backup_gdb() 688 * blocks are not otherwise touched by the filesystem code when it is 776 * Otherwise, we may need to add backup GDT blocks for a sparse group. 823 "No reserved GDT blocks, can't resize"); ext3_group_add() 845 * modify each of the reserved GDT dindirect blocks. ext3_group_add() 867 * We will only either add reserved group blocks to a backup group ext3_group_add() 868 * or remove reserved blocks for the first group in a new group block. ext3_group_add() 896 * group; then we update the total disk blocks count; then we ext3_group_add() 899 * using the new disk blocks. ext3_group_add() 912 * Make the new blocks and inodes valid next. We do this before ext3_group_add() 914 * all of its blocks and inodes are already valid. ext3_group_add() 918 * blocks/inodes before the group is live won't actually let us ext3_group_add() 982 /* Extend the filesystem to the new number of blocks specified. This entry 985 * for emergencies (because it has no dependencies on reserved blocks). 989 * GDT blocks are reserved to grow to the desired size. 1009 " up to "E3FSBLK" blocks\n", ext3_group_extend() 1017 " too large to resize to "E3FSBLK" blocks safely\n", ext3_group_extend() 1031 /* Handle the remaining blocks in the last group only. */ ext3_group_extend() 1054 " blocks, %u new)", ext3_group_extend() 1103 ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n", ext3_group_extend() 1106 ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n", ext3_group_extend() 1111 printk(KERN_DEBUG "EXT3-fs: extended group to %u blocks\n", ext3_group_extend()
|
H A D | balloc.c | 19 * balloc.c contains the blocks allocation and deallocation routines 23 * The free blocks are managed by bitmaps. A file system contains several 24 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 25 * block for inodes, N blocks for the inode table and data blocks. 29 * the free blocks count in the block. The descriptors are loaded in memory 483 * ext3_free_blocks_sb() -- Free given blocks and update quota 487 * @count: number of blocks to free 513 "Freeing blocks not in datazone - " ext3_free_blocks_sb() 527 * Check to see if we are freeing blocks across a group ext3_free_blocks_sb() 549 "Freeing blocks in system zones - " ext3_free_blocks_sb() 556 * We are about to start releasing blocks in the bitmap, ext3_free_blocks_sb() 674 * ext3_free_blocks() -- Free given blocks and update quota 678 * @count: number of blocks to count 698 * For ext3 allocations, we must not reuse any blocks which are 785 * block within the next XX blocks. find_next_usable_block() 858 * @count: target number of blocks to allocate 861 * Attempt to allocate blocks within a give range. Set the range of allocation 863 * and at last, allocate the blocks by claiming the found free bit as allocated. 1060 * disk bitmap later and then, if there are free blocks then we adjust find_next_reservable_window() 1250 * required number of free blocks 1252 * Since ext3_try_to_allocate() will always allocate blocks within 1254 * multiple blocks allocation has to stop at the end of the reservation 1256 * blocks needed and the current size of the window, we try to 1258 * basis before ext3_new_blocks() tries to allocate blocks, 1293 * @count: target number of blocks to allocate 1308 * reservation), and there are lots of free blocks, but they are all 1477 * @count: target number of blocks to allocate 1499 ext3_grpblk_t free_blocks; /* number of free blocks in a group */ ext3_new_blocks() 1563 * if there is not enough free blocks to make a new resevation ext3_new_blocks() 1603 * are no free blocks ext3_new_blocks() 1609 * free blocks is less than half of the reservation ext3_new_blocks() 1633 * there maybe indeed free blocks available on disk ext3_new_blocks() 1667 "blocks from "E3FSBLK", length %lu", ext3_new_blocks() 1670 * claim_block() marked the blocks we allocated as in use. So we ext3_new_blocks() 1671 * may want to selectively mark some of the blocks as free. ext3_new_blocks() 1710 "block("E3FSBLK") >= blocks count(%d) - " ext3_new_blocks() 1772 * ext3_count_free_blocks() -- count filesystem free blocks 1775 * Adds up the number of free blocks from each block group. 1850 * ext3_bg_has_super - number of blocks used by the superblock in group 1854 * Return the number of blocks used by the superblock (primary or backup) 1883 * ext3_bg_num_gdb - number of blocks used by the group table in group 1887 * Return the number of blocks used by the group descriptor table 1889 * different number of descriptor blocks in each group. 1915 * blocks. When the free block is found, it tries to allocate this block and 1982 /* We did not claim any blocks */ ext3_trim_all_free() 2062 ext3_debug("trimmed %d blocks in the group %d\n", ext3_trim_all_free()
|
H A D | inode.c | 51 * which has been journaled. Metadata (eg. indirect blocks) must be 76 * data blocks. */ ext3_forget() 100 * Work out how many blocks we need to proceed with the next chunk of a 175 * At this moment, get_block can be called only for blocks inside truncate_restart_transaction() 321 * data blocks at leaves and indirect blocks in intermediate nodes. 328 * we need to know is the capacity of indirect blocks (taken from the 336 * if our filesystem had 8Kb blocks. We might use long long, but that would 382 * ext3_get_branch - read the chain of indirect blocks leading to data 385 * @offsets: offsets of pointers in inode/indirect blocks 407 * or when it reads all @depth-1 indirect blocks successfully and finds 525 * of direct blocks need to be allocated for the given branch. 527 * @branch: chain of indirect blocks 528 * @k: number of blocks need for indirect blocks 529 * @blks: number of data blocks to be mapped. 532 * return the total number of blocks to be allocate, including the 533 * direct and indirect blocks. 542 * then it's clear blocks on that path have not allocated ext3_blks_to_allocate() 562 * ext3_alloc_blocks - multiple allocate blocks needed for a branch 566 * @indirect_blks: the number of blocks need to allocate for indirect 567 * blocks 568 * @blks: number of blocks need to allocated for direct blocks 570 * the indirect blocks(if needed) and the first direct block, 573 * return the number of direct blocks allocated 586 * Here we try to allocate the requested multiple blocks at once, ext3_alloc_blocks() 588 * To build a branch, we should allocate blocks for ext3_alloc_blocks() 589 * the indirect blocks(if not allocated yet), and at least ext3_alloc_blocks() 591 * minimum number of blocks need to allocate(required) ext3_alloc_blocks() 597 /* allocating blocks for indirect blocks and direct blocks */ ext3_alloc_blocks() 603 /* allocate blocks for indirect blocks */ ext3_alloc_blocks() 616 /* total number of blocks allocated for direct blocks */ ext3_alloc_blocks() 627 * ext3_alloc_branch - allocate and set up a chain of blocks. 630 * @indirect_blks: number of allocated indirect blocks 631 * @blks: number of allocated direct blocks 633 * @offsets: offsets (in the blocks) to store the pointers to next. 636 * This function allocates blocks, zeroes out all but the last one, 648 * If allocation fails we free all blocks we've allocated (and forget 672 * metadata blocks and data blocks are allocated. ext3_alloc_branch() 704 * data blocks numbers ext3_alloc_branch() 740 * @num: number of indirect blocks we are adding 741 * @blks: number of direct blocks we are adding 775 * direct blocks blocks ext3_splice_branch() 841 * to tree, set linkage between the newborn blocks, write them if sync is 844 * removals - all blocks on the path are immune now) and possibly force the 847 * allocations is needed - we simply release blocks and do not touch anything 853 * return > 0, # of blocks mapped or allocated. 889 /*map more blocks*/ ext3_get_blocks_handle() 962 /* the number of blocks need to allocate for [d,t]indirect blocks */ ext3_get_blocks_handle() 967 * direct blocks to allocate for this branch. ext3_get_blocks_handle() 1010 /* Maximum number of blocks we map for direct IO at once. */ 1015 * For B blocks with A block pointers per block we need: 1076 * ext3_get_blocks_handle() returns number of blocks ext3_getblk() 1231 * Truncate blocks that were not used by write. We have to truncate the 1241 * Truncate blocks that were not used by direct IO write. We have to zero out 1262 * we allocate blocks but write fails for some reason */ ext3_write_begin() 1295 * block_write_begin may have instantiated a few blocks ext3_write_begin() 1349 * This is nasty and subtle: ext3_write_begin() could have allocated blocks 1393 * There may be allocated blocks outside of i_size because ext3_ordered_write_end() 1422 * There may be allocated blocks outside of i_size because ext3_writeback_write_end() 1467 * There may be allocated blocks outside of i_size because ext3_journalled_write_end() 1504 * take extra steps to flush any blocks which might be in the cache. 1562 * Note that whenever we need to map blocks we start a transaction even if 1862 * blocks outside i_size. Trim these off again. ext3_direct_IO() 1881 * but cannot extend i_size. Truncate allocated blocks ext3_direct_IO() 2109 * ext3_find_shared - find the indirect blocks for partial truncation. 2113 * @chain: place to store the pointers to partial indirect blocks 2119 * indirect blocks but leave the blocks themselves alive. Block is 2131 * partially truncated blocks - in @chain[].bh and pointers to 2196 * We release `count' blocks on disk, but (last - first) may be greater 2242 * ext3_free_data - free a list of data blocks 2249 * We are freeing all blocks referred from that array (numbers are stored as 2252 * We accumulate contiguous runs of blocks to free. Conveniently, if these 2253 * blocks are contiguous then releasing them at one time will only affect one 2254 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 2265 unsigned long count = 0; /* Number of blocks in the run */ ext3_free_data() 2278 * to the blocks, we can't free them. */ ext3_free_data() 2286 /* accumulate blocks to free if they're contiguous */ ext3_free_data() 2337 * We are freeing all blocks referred from these branches (numbers are 2395 * rather than leaking blocks. ext3_free_branches() 2421 * crash does not overwrite non-journaled data blocks ext3_free_branches() 2449 BUFFER_TRACE(parent_bh, "free data blocks"); ext3_free_branches() 2488 * ext3_truncate() to have another go. So there will be instantiated blocks 2552 if (n == 1) { /* direct blocks */ ext3_truncate() 2577 /* Clear the ends of indirect blocks on the shared branch */ ext3_truncate() 3233 * dirty pages and freeing of disk blocks, we can guarantee that any 3234 * commit will leave the blocks being flushed in an unused state on 3235 * disk. (On recovery, the inode will get truncated and the blocks will 3237 * leave these blocks visible to the user.) 3341 * How many blocks doth make a writepage()? 3343 * With N blocks per page, it may be: 3344 * N data blocks 3348 * N+5 bitmap blocks (from the above) 3349 * N+5 group descriptor summary blocks 3356 * With ordered or writeback data it's the same, less the N data blocks. 3358 * If the inode's direct blocks can hold an integral number of pages then a 3359 * page cannot straddle two indirect blocks, and we can only touch one indirect 3380 * we will be updating only the data blocks + inodes */ ext3_writepage_trans_blocks() 3463 * Also, dquot_alloc_space() will always dirty the inode when blocks
|
H A D | ext3.h | 38 * Define EXT3_RESERVATION to reserve data blocks for expanding files 41 /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ 62 #define EXT3_BAD_INO 1 /* Bad blocks inode */ 100 * Structure of a blocks group descriptor 107 __le16 bg_free_blocks_count; /* Free blocks count */ 123 * Constants relative to the data blocks 189 __u32 blocks_count; /* Total number of blocks in this group */ 190 __u16 reserved_blocks; /* Number of reserved blocks in this group */ 283 __le32 i_block[EXT3_N_BLOCKS];/* Pointers to blocks */ 409 __le32 s_r_blocks_count; /* Reserved blocks count */ 410 __le32 s_free_blocks_count; /* Free blocks count */ 430 /*50*/ __le16 s_def_resuid; /* Default uid for reserved blocks */ 431 __le16 s_def_resgid; /* Default gid for reserved blocks */ 459 __u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/ 479 __le32 s_r_blocks_count_hi; /* Reserved blocks count */ 480 __le32 s_free_blocks_count_hi; /* Free blocks count */ 487 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ 497 /* data type for filesystem-wide blocks number */ 556 * place a file's data blocks near its inode block, and new inodes 585 * The intent is that i_disksize always represents the blocks which 604 * truncate, the inode and all the metadata blocks *must* be in a 633 unsigned long s_blocks_per_group;/* Number of blocks in a group */ 635 unsigned long s_itb_per_group; /* Number of inode table blocks per group */ 636 unsigned long s_gdb_count; /* Number of group descriptor blocks */ 803 * Default values for user and/or group using reserved blocks 1127 /* Define the number of blocks we need to account to a transaction to 1131 * indirection blocks, the group and superblock summaries, and the data 1178 /* Amount of blocks needed for quota update - we know that the structure was 1181 /* Amount of blocks needed for quota insert/delete - we do some block writes
|
H A D | fsync.c | 22 * we can depend on generic_block_fdatasync() to sync the data blocks. 75 * (they were dirtied by commit). But that's OK - the blocks are ext3_sync_file()
|
H A D | xattr.h | 11 /* Magic value in attribute blocks */ 28 __le32 h_blocks; /* number of disk blocks used */
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
H A D | radeon_mem.c | 138 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); init_heap() local 140 if (!blocks) init_heap() 145 kfree(blocks); init_heap() 149 blocks->start = start; init_heap() 150 blocks->size = size; init_heap() 151 blocks->file_priv = NULL; init_heap() 152 blocks->next = blocks->prev = *heap; init_heap() 155 (*heap)->next = (*heap)->prev = blocks; init_heap() 159 /* Free all blocks associated with the releasing file.
|
H A D | evergreen_reg.h | 59 /* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */ 166 /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ 196 /* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */ 231 /* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ 251 /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
|
/linux-4.1.27/fs/ext4/ |
H A D | readpage.c | 22 * - encountering a page with non-contiguous blocks 93 * If a page does not map to a contiguous run of blocks then it simply falls 148 sector_t blocks[MAX_BUF_PER_PAGE]; ext4_mpage_readpages() local 184 * Map blocks using the previous result first. ext4_mpage_readpages() 200 blocks[page_block] = map.m_pblk + map_offset + ext4_mpage_readpages() 236 /* Contiguous blocks? */ ext4_mpage_readpages() 237 if (page_block && blocks[page_block-1] != map.m_pblk-1) ext4_mpage_readpages() 246 blocks[page_block] = map.m_pblk+relative_block; ext4_mpage_readpages() 272 if (bio && (last_block_in_bio != blocks[0] - 1)) { ext4_mpage_readpages() 294 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); ext4_mpage_readpages() 309 last_block_in_bio = blocks[blocks_per_page - 1]; ext4_mpage_readpages()
|
H A D | indirect.c | 51 * data blocks at leaves and indirect blocks in intermediate nodes. 58 * we need to know is the capacity of indirect blocks (taken from the 66 * if our filesystem had 8Kb blocks. We might use long long, but that would 113 * ext4_get_branch - read the chain of indirect blocks leading to data 116 * @offsets: offsets of pointers in inode/indirect blocks 136 * or when it reads all @depth-1 indirect blocks successfully and finds 258 * of direct blocks need to be allocated for the given branch. 260 * @branch: chain of indirect blocks 261 * @k: number of blocks need for indirect blocks 262 * @blks: number of data blocks to be mapped. 265 * return the total number of blocks to be allocate, including the 266 * direct and indirect blocks. 275 * then it's clear blocks on that path have not allocated ext4_blks_to_allocate() 295 * ext4_alloc_branch - allocate and set up a chain of blocks. 298 * @indirect_blks: number of allocated indirect blocks 299 * @blks: number of allocated direct blocks 301 * @offsets: offsets (in the blocks) to store the pointers to next. 304 * This function allocates blocks, zeroes out all but the last one, 316 * If allocation fails we free all blocks we've allocated (and forget 383 * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and ext4_alloc_branch() 401 * @chain: chain of indirect blocks (with a missing link - see 404 * @num: number of indirect blocks we are adding 405 * @blks: number of direct blocks we are adding 436 * direct blocks blocks ext4_splice_branch() 492 * to tree, set linkage between the newborn blocks, write them if sync is 495 * removals - all blocks on the path are immune now) and possibly force the 498 * allocations is needed - we simply release blocks and do not touch anything 503 * return > 0, # of blocks mapped or allocated. 509 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or 511 * blocks. 543 /*map more blocks*/ ext4_ind_map_blocks() 566 EXT4_ERROR_INODE(inode, "Can't allocate blocks for " ext4_ind_map_blocks() 582 /* the number of blocks need to allocate for [d,t]indirect blocks */ ext4_ind_map_blocks() 587 * direct blocks to allocate for this branch. ext4_ind_map_blocks() 762 * Calculate the number of metadata blocks need to reserve 788 * Calculate number of indirect blocks touched by mapping @nrblocks logically 789 * contiguous blocks 794 * With N contiguous data blocks, we need at most ext4_ind_trans_blocks() 795 * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, ext4_ind_trans_blocks() 796 * 2 dindirect blocks, and 1 tindirect block ext4_ind_trans_blocks() 838 * ext4_find_shared - find the indirect blocks for partial truncation. 842 * @chain: place to store the pointers to partial indirect blocks 848 * indirect blocks but leave the blocks themselves alive. Block is 860 * partially truncated blocks - in @chain[].bh and pointers to 926 * We release `count' blocks on disk, but (last - first) may be greater 950 "blocks %llu len %lu", ext4_clear_blocks() 988 * ext4_free_data - free a list of data blocks 995 * We are freeing all blocks referred from that array (numbers are stored as 998 * We accumulate contiguous runs of blocks to free. Conveniently, if these 999 * blocks are contiguous then releasing them at one time will only affect one 1000 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 1011 unsigned long count = 0; /* Number of blocks in the run */ ext4_free_data() 1024 * to the blocks, we can't free them. */ ext4_free_data() 1032 /* accumulate blocks to free if they're contiguous */ ext4_free_data() 1087 * We are freeing all blocks referred from these branches (numbers are 1154 * rather than leaking blocks. ext4_free_branches() 1172 * transaction where the data blocks are ext4_free_branches() 1198 BUFFER_TRACE(parent_bh, "free data blocks"); ext4_free_branches() 1240 * It is unnecessary to free any data blocks if last_block is ext4_ind_truncate() 1244 } else if (n == 1) { /* direct blocks */ ext4_ind_truncate() 1270 /* Clear the ends of indirect blocks on the shared branch */ ext4_ind_truncate() 1312 * Free the blocks in the defined range (end is exclusive endpoint of 1382 * Clear the ends of indirect blocks on the shared branch ext4_ind_remove_space() 1418 * Clear the ends of indirect blocks on the shared branch ext4_ind_remove_space()
|
H A D | block_validity.c | 7 * Track which blocks in the filesystem are metadata blocks that 8 * should never be used as data blocks by files or directories. 52 * Mark a range of blocks as belonging to the "system zone" --- that 53 * is, filesystem metadata blocks which should never be used by 194 * overlaps with filesystem metadata blocks.
|
H A D | resize.c | 112 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " verify_group_input() 122 ext4_warning(sb, "Reserved blocks too high (%u)", verify_group_input() 125 ext4_warning(sb, "Bad blocks count %u", verify_group_input() 138 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", verify_group_input() 241 * Returns 0 on a successful allocation of the metadata blocks in the 278 /* We collect contiguous blocks as much as possible. */ ext4_alloc_group_tables() 344 "blocks (%d free)\n", ext4_alloc_group_tables() 400 * set_flexbg_block_bitmap() mark @count blocks starting from @block used. 414 ext4_debug("mark blocks [%llu/%u] used\n", block, count); set_flexbg_block_bitmap() 462 * changing blocks outside the actual filesystem. We still do journaling to 468 * In this step, we only set bits in blocks bitmaps for blocks taken by 471 * bitmap for blocks taken by group tables. 520 /* Copy all of the GDT blocks into the backup in this group */ setup_new_flex_group_blocks() 554 * table blocks setup_new_flex_group_blocks() 568 /* Zero out all of the inode table blocks */ setup_new_flex_group_blocks() 570 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n", setup_new_flex_group_blocks() 706 * Check that all of the backup GDT blocks are held in the primary GDT block. 744 * The new backup GDT blocks will be stored as leaf blocks in this indirect 746 * we check to ensure that the resize inode has actually reserved these blocks. 748 * Don't need to update the block bitmaps because the blocks are still in use. 832 * the new GDT block for use (which also "frees" the backup GDT blocks add_new_gdb() 834 * these blocks, because they are marked as in-use from being in the add_new_gdb() 835 * reserved inode, and will become GDT blocks (primary and backup). add_new_gdb() 922 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 923 * We need to add these reserved backup GDT blocks to the resize inode, so 927 * The indirect blocks are actually the primary reserved GDT blocks, 930 * GDT blocks so we don't overwrite a data block by accident. The reserved 931 * backup GDT blocks are stored in their reserved primary GDT block. 998 * Finally we can add each of the reserved backup GDT blocks from reserve_backup_gdb() 1037 * blocks are not otherwise touched by the filesystem code when it is 1170 * We will only either add reserved group blocks to a backup group ext4_add_new_descs() 1171 * or remove reserved blocks for the first group in a new group block. ext4_add_new_descs() 1319 * Make the new blocks and inodes valid next. We do this before ext4_update_super() 1321 * all of its blocks and inodes are already valid. ext4_update_super() 1325 * blocks/inodes before the group is live won't actually let us ext4_update_super() 1345 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es)); ext4_update_super() 1382 ext4_debug("free blocks count %llu", ext4_update_super() 1402 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count, ext4_update_super() 1435 * blocks. If we are adding a group past the last current GDT block, ext4_flex_group_add() 1438 * modify each of the reserved GDT dindirect blocks. ext4_flex_group_add() 1441 /* GDT blocks */ ext4_flex_group_add() 1443 credit += reserved_gdb; /* Reserved GDT dindirect blocks */ ext4_flex_group_add() 1567 * Otherwise, we may need to add backup GDT blocks for a sparse group. 1611 "No reserved GDT blocks, can't resize"); ext4_group_add() 1672 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, ext4_group_extend_no_check() 1674 /* We add the blocks to the bitmap and set the group need init bit */ ext4_group_extend_no_check() 1679 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, ext4_group_extend_no_check() 1689 "blocks\n", ext4_blocks_count(es)); ext4_group_extend_no_check() 1697 * Extend the filesystem to the new number of blocks specified. This entry 1700 * for emergencies (because it has no dependencies on reserved blocks). 1704 * GDT blocks are reserved to grow to the desired size. 1720 "extending last group from %llu to %llu blocks", ext4_group_extend() 1728 "filesystem too large to resize to %llu blocks safely", ext4_group_extend() 1740 /* Handle the remaining blocks in the last group only. */ ext4_group_extend() 1759 ext4_warning(sb, "will only finish group (%llu blocks, %u new)", ext4_group_extend() 1782 * are no more reserved gdt blocks, and then convert the file system 1867 * @n_blocks_count: the number of blocks resides in the resized fs 1899 "to %llu blocks", o_blocks_count, n_blocks_count); ext4_resize_fs() 1998 "resized to %llu blocks", ext4_resize_fs()
|
H A D | truncate.h | 8 * Truncate blocks that were not used by write. We have to truncate the 20 * Work out how many blocks we need to proceed with the next chunk of a
|
H A D | mballoc.h | 55 #define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */ 88 * default group prealloc size 512 blocks 126 ext4_grpblk_t pa_free; /* how many blocks are free */ 149 * (512). We store prealloc space into the hash based on the pa_free blocks 185 __u8 ac_2order; /* if request is to allocate 2^N blocks and
|
H A D | balloc.c | 28 * balloc.c contains the blocks allocation and deallocation routines 97 * descriptor blocks */ ext4_num_overhead_clusters() 108 * Normally all of these blocks are contiguous, so the special ext4_num_overhead_clusters() 161 unsigned int blocks; num_clusters_in_group() local 168 * blocks. num_clusters_in_group() 170 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) - num_clusters_in_group() 173 blocks = EXT4_BLOCKS_PER_GROUP(sb); num_clusters_in_group() 174 return EXT4_NUM_B2C(EXT4_SB(sb), blocks); num_clusters_in_group() 191 /* If checksum is bad mark all blocks used to prevent allocation ext4_init_block_bitmap() 236 * Also if the number of blocks within the group is less than ext4_init_block_bitmap() 247 /* Return the number of free blocks in a block group. It is used when 259 * The free blocks are managed by bitmaps. A file system contains several 260 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 261 * block for inodes, N blocks for the inode table and data blocks. 265 * the free blocks count in the block. The descriptors are loaded in memory 327 * blocks may not be in the group at all ext4_valid_block_bitmap() 521 * @nclusters: number of needed blocks 566 /* No free blocks. Let's see if we can dip into reserved pool */ ext4_has_free_clusters() 610 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks 629 /* Fill with neighbour allocated blocks */ ext4_new_meta_blocks() 639 * Account for the allocated meta blocks. We will never ext4_new_meta_blocks() 730 * ext4_bg_has_super - number of blocks used by the superblock in group 734 * Return the number of blocks used by the superblock (primary or backup) 786 * ext4_bg_num_gdb - number of blocks used by the group table in group 790 * Return the number of blocks used by the group descriptor table 792 * different number of descriptor blocks in each group. 810 * the beginning of a block group, including the reserved gdt blocks. 837 * Return the ideal location to start allocating blocks for a
|
H A D | ext4_extents.h | 25 * With AGGRESSIVE_TEST defined, the capacity of index/leaf blocks 33 * With EXTENTS_STATS defined, the number of blocks and extents 56 * For non-inode extent blocks, ext4_extent_tail 77 __le16 ee_len; /* number of blocks covered by extent */ 87 __le32 ei_block; /* index covers logical blocks from 'block' */ 101 __le16 eh_depth; /* has tree real underlying blocks? */ 138 * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an 143 * EXT_UNWRITTEN_MAX_LEN is the maximum number of blocks we can have in an 151 * Hence, the maximum number of blocks we can have in an *initialized*
|
H A D | mballoc.c | 54 * - tree of groups sorted by number of free blocks 59 * The allocation request involve request for multiple number of blocks 68 * s_mb_stream_request is 16 blocks. This can also be tuned via 70 * terms of number of blocks. 88 * we have contiguous physical blocks representing the file blocks 94 * If we are not able to find blocks in the inode prealloc space and if we 106 * If we can't allocate blocks via inode prealloc or/and locality group 121 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 122 * blocksize) blocks. So it can have information regarding groups_per_page 128 * We look for count number of blocks in the buddy cache. If we were able 129 * to locate that many free blocks we return with additional information 132 * Before allocating blocks via buddy cache we normalize the request 133 * blocks. This ensure we ask for more blocks that we needed. The extra 134 * blocks that we get after allocation is added to the respective prealloc 140 * 512 blocks. This can be tuned via 142 * terms of number of blocks. If we have mounted the file system with -O 154 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 163 * best extent in the found extents. Searching for the blocks starts with 185 * physical blocks. any block from that preallocated can be used 186 * independent. the descriptor just tracks number of blocks left 190 * must discard all preallocated blocks. 200 * this mean blocks mballoc considers used are: 201 * - allocated blocks (persistent) 202 * - preallocated blocks (non-persistent) 209 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 237 * blocks for PA are allocated in the buddy, buddy must be referenced 263 * nothing wrong here -- they're different PAs covering different blocks 285 * find blocks 307 * mark free blocks in-core 658 * Divide blocks started from @first with length @len into 659 * smaller chunks with power of 2 blocks. 660 * Clear the bits in bitmap which the blocks of the chunk(s) covered, 678 /* find how many blocks can be covered since this position */ ext4_mb_mark_free_simple() 681 /* find how many blocks of power 2 we need to mark */ ext4_mb_mark_free_simple() 808 * So for each group we take up 2 blocks. A page can 809 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 988 * and buddy information in consecutive blocks. ext4_mb_get_buddy_page_lock() 989 * So for each group we need two blocks. ext4_mb_get_buddy_page_lock() 1139 * and buddy information in consecutive blocks. ext4_mb_load_buddy() 1140 * So for each group we need two blocks. ext4_mb_load_buddy() 1361 * corresponding blocks were allocated. Bits in range mb_buddy_mark_free() 1637 * allocated blocks for history */ ext4_mb_use_best_found() 1648 * double allocate blocks. The reference is dropped ext4_mb_use_best_found() 1855 * number of blocks to an existing extent */ ext4_mb_find_by_goal() 1915 * free blocks in the group, so the routine can know upper limit. 1938 * free blocks even though group info says we ext4_mb_complex_scan_group() 1939 * we have free blocks ext4_mb_complex_scan_group() 1953 "group info. But got %d blocks", ext4_mb_complex_scan_group() 1956 * The number of free blocks differs. This mostly ext4_mb_complex_scan_group() 2091 /* non-extent files are limited to low blocks/groups */ ext4_mb_regular_allocator() 2134 /* Let's just scan groups to find more-less suitable blocks */ ext4_mb_regular_allocator() 2722 "mballoc: %u blocks %u reqs (%u success)", ext4_mb_release() 2764 * so we know we can free the blocks that were released with that commit. 2775 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", ext4_free_data_callback() 2796 /* there are blocks to put in buddy to make them really free */ ext4_free_data_callback() 2808 * is supported and the free blocks will be trimmed online. ext4_free_data_callback() 2824 mb_debug(1, "freed %u blocks in %u structures\n", count, count2); ext4_free_data_callback() 2914 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " ext4_mb_mark_diskspace_used() 2918 * We leak some of the blocks here. ext4_mb_mark_diskspace_used() 2959 /* release all the reserved blocks if non delalloc */ ext4_mb_mark_diskspace_used() 2996 mb_debug(1, "#%u: goal %u blocks for locality group\n", ext4_mb_normalize_group_request() 3022 /* sometime caller may want exact blocks */ ext4_mb_normalize_request() 3090 /* don't cover already allocated blocks in selected range */ ext4_mb_normalize_request() 3100 /* check we don't cross already preallocated blocks */ ext4_mb_normalize_request() 3167 /* XXX: is it better to align blocks WRT to logical ext4_mb_normalize_request() 3188 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, ext4_mb_normalize_request() 3216 * Called on failure; free up any blocks from the inode PA for this 3252 * use blocks preallocated to inode 3262 /* found preallocated blocks, use them */ ext4_mb_use_inode_pa() 3282 * use blocks preallocated to locality group 3298 * instead we correct pa later, after blocks are marked ext4_mb_use_group_pa() 3335 * search goal blocks in preallocated space 3362 /* non-extent files can't have physical blocks past 2^32 */ ext4_mb_use_preallocated() 3368 /* found preallocated blocks, use them */ ext4_mb_use_preallocated() 3468 * otherwise we could leave used blocks available for ext4_mb_generate_from_pa() 3614 * allocated blocks for history */ ext4_mb_new_inode_pa() 3674 * allocated blocks for history */ ext4_mb_new_group_pa() 3725 * finds all unused blocks in on-disk bitmap, frees them in 3877 /* if we still need more blocks and some PAs were used, try again */ ext4_mb_discard_group_preallocations() 3916 * releases all non-used preallocated blocks for given inode 4190 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " ext4_mb_initialize_context() 4390 * Main entry point into mballoc to allocate blocks 4417 * there is enough free blocks to do block allocation ext4_mb_new_blocks() 4527 /* release all the reserved blocks if non delalloc */ ext4_mb_new_blocks() 4538 * We can merge two free data extents only if the physical blocks 4540 * AND the blocks are associated with the same group. 4577 * blocks */ ext4_mb_free_metadata() 4630 * ext4_free_blocks() -- Free given blocks and update quota 4634 * @count: number of blocks to count 4665 ext4_error(sb, "Freeing blocks not in datazone - " ext4_free_blocks() 4705 * blocks at the beginning or the end unless we are explicitly ext4_free_blocks() 4742 * Check to see if we are freeing blocks across a group ext4_free_blocks() 4769 ext4_error(sb, "Freeing blocks in system zone - " ext4_free_blocks() 4805 * blocks being freed are metadata. these blocks shouldn't ext4_free_blocks() 4882 * ext4_group_add_blocks() -- Add given blocks to an existing group 4886 * @count: number of blocks to free 4888 * This marks the blocks as free in the bitmap and buddy. 4911 * Check to see if we are freeing blocks across a group ext4_group_add_blocks() 4915 ext4_warning(sb, "too much blocks added to group %u\n", ext4_group_add_blocks() 4938 ext4_error(sb, "Adding blocks in system zones - " ext4_group_add_blocks() 5019 * @count: number of blocks to TRIM 5023 * Trim "count" blocks starting at "start" in the "group". To assure that no 5024 * one will allocate those blocks, mark it as used in buddy bitmap. This must 5044 * Mark blocks used, so no one can reuse them while __releases() 5141 ext4_debug("trimmed %d blocks in the group %d\n", ext4_trim_all_free()
|
H A D | ext4_jbd2.h | 24 /* Define the number of blocks we need to account to a transaction to 28 * indirection blocks, the group and superblock summaries, and the data 57 * Define the number of metadata blocks we need to account to modify data. 59 * This include super block, inode block, quota blocks and xattr blocks 84 /* Amount of blocks needed for quota update - we know that the structure was 89 /* Amount of blocks needed for quota insert/delete - we do some block writes 265 int type, int blocks, int rsv_blocks); 305 #define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \ 306 __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks)) 310 int blocks, int rsv_blocks) __ext4_journal_start() 312 return __ext4_journal_start_sb(inode->i_sb, line, type, blocks, __ext4_journal_start() 308 __ext4_journal_start(struct inode *inode, unsigned int line, int type, int blocks, int rsv_blocks) __ext4_journal_start() argument
|
H A D | ext4_jbd2.c | 63 int type, int blocks, int rsv_blocks) __ext4_journal_start_sb() 68 trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_); __ext4_journal_start_sb() 76 return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS, __ext4_journal_start_sb() 174 * which has been journaled. Metadata (eg. indirect blocks) must be 209 * data blocks. */ __ext4_forget() 62 __ext4_journal_start_sb(struct super_block *sb, unsigned int line, int type, int blocks, int rsv_blocks) __ext4_journal_start_sb() argument
|
H A D | inode.c | 164 * moment, get_block can be called only for blocks inside i_size since ext4_truncate_restart_trans() 335 "with only %d reserved data blocks", ext4_da_update_reserve_space() 348 /* Update quota subsystem for data blocks */ ext4_da_update_reserve_space() 355 * not re-claim the quota for fallocated blocks. ext4_da_update_reserve_space() 436 * The ext4_map_blocks() function tries to look up the requested blocks, 437 * and returns if the blocks are already mapped. 439 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 440 * and store the allocated blocks in the result buffer head and mark it 447 * On success, it returns the number of blocks being mapped or allocated. 448 * if create==0 and the blocks are pre-allocated and unwritten block, 452 * It returns 0 if plain look up failed (blocks have not been allocated), in 558 * Returns if the blocks have already allocated ext4_map_blocks() 560 * Note that if blocks have been preallocated ext4_map_blocks() 580 * New blocks allocate and/or writing to unwritten extent ext4_map_blocks() 598 * We allocated new blocks which will result in ext4_map_blocks() 606 * Update reserved blocks/metadata blocks after successful ext4_map_blocks() 687 /* Maximum number of blocks we map for direct IO at once. */ 1015 * we allocate blocks but write fails for some reason ext4_write_begin() 1084 * __block_write_begin may have instantiated a few blocks ext4_write_begin() 1187 /* if we have allocated more blocks and copied ext4_write_end() 1188 * less. We will have blocks allocated outside ext4_write_end() 1261 /* if we have allocated more blocks and copied ext4_journalled_write_end() 1262 * less. We will have blocks allocated outside ext4_journalled_write_end() 1304 * recalculate the amount of metadata blocks to reserve ext4_da_reserve_space() 1340 * if there aren't enough reserved blocks, then the ext4_da_release_space() 1347 "data blocks", inode->i_ino, to_free, ext4_da_release_space() 1354 /* update fs dirty data blocks counter */ ext4_da_release_space() 1406 /* If we have released all the blocks belonging to a cluster, then we ext4_da_page_release_reservation() 1490 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", ext4_print_free_blocks() 1514 * time. This function looks up the requested blocks and sets the 1671 * preallocated blocks are unmapped but should treated ext4_da_get_block_prep() 1672 * the same as allocated blocks. ext4_da_get_block_prep() 1808 * multiple blocks we need to write those buffer_heads that are mapped. This 1919 * mballoc gives us at most this number of blocks... 1926 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 1928 * @mpd - extent of blocks 1932 * The function is used to collect contig. blocks in the same state. If the 1977 * @mpd - extent of blocks for mapping 1997 ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) mpage_process_page_bufs() local 2003 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) { mpage_process_page_bufs() 2017 return lblk < blocks; mpage_process_page_bufs() 2127 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or mpage_map_one_extent() 2129 * where we have written into one or more preallocated blocks). It is mpage_map_one_extent() 2130 * possible that we're going to need more metadata blocks than mpage_map_one_extent() 2133 * in data loss. So use reserved blocks to allocate metadata if mpage_map_one_extent() 2137 * the blocks in question are delalloc blocks. This indicates mpage_map_one_extent() 2138 * that the blocks and quotas has already been checked when mpage_map_one_extent() 2183 * delayed, blocks are allocated, if it is unwritten, we may need to convert 2186 * can return less blocks or the range is covered by more unwritten extents. We 2214 * is non-zero, a commit should free up blocks. mpage_map_and_submit_extent() 2225 " max blocks %u with error %d", mpage_map_and_submit_extent() 2279 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2281 * bpp - 1 blocks in bpp different extents. 2302 * mpd->lblk with length mpd->len blocks). 2571 * free blocks released in the transaction ext4_writepages() 2622 * Start pushing delalloc when 1/2 of free blocks are dirty. ext4_nonda_switch() 2630 * free block count is less than 150% of dirty blocks ext4_nonda_switch() 2631 * or free blocks is less than watermark ext4_nonda_switch() 2730 * block_write_begin may have instantiated a few blocks ext4_da_write_begin() 2833 * Drop reserved blocks ext4_da_invalidatepage() 2848 * Force all delayed allocation blocks to be allocated for a given inode. 2859 * also start triggering a write of the data blocks, which is ext4_alloc_da_blocks() 2885 * which will map the blocks, and start the I/O, but not ext4_alloc_da_blocks() 2903 * take extra steps to flush any blocks which might be in the cache. 2922 * blocks for file ext4_bmap() 3042 * We allocate an uinitialized extent if blocks haven't been allocated. 3088 * For holes, we fallocate those blocks, mark them as unwritten 3089 * If those blocks were preallocated, we mark sure they are split, but 3140 * Allocated blocks to fill the hole are marked as ext4_ext_direct_IO() 3577 * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3682 /* If there are no blocks to remove, return now */ ext4_punch_hole() 3767 * ext4_truncate() to have another go. So there will be instantiated blocks 3966 * blocks from the inode table. __ext4_get_inode_loc() 4430 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1). ext4_update_other_inodes_time() 4709 * dirty pages and freeing of disk blocks, we can guarantee that any 4710 * commit will leave the blocks being flushed in an unused state on 4711 * disk. (On recovery, the inode will get truncated and the blocks will 4713 * leave these blocks visible to the user.) 4881 * have data blocks allocated (it may have an external xattr block). ext4_getattr() 4886 stat->blocks += (stat->size + 511) >> 9; ext4_getattr() 4892 * on-disk file blocks. ext4_getattr() 4895 * will return the blocks that include the delayed allocation ext4_getattr() 4896 * blocks for this file. ext4_getattr() 4900 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9); ext4_getattr() 4913 * Account for index blocks, block groups bitmaps and block group 4914 * descriptor blocks if modify datablocks and index blocks 4915 * worse case, the indexs blocks spread over different block groups 4921 * Also account for superblock, inode, quota and xattr blocks 4932 * How many index blocks need to touch to map @lblocks logical blocks ext4_meta_trans_blocks() 4950 /* bitmaps and block group descriptor blocks */ ext4_meta_trans_blocks() 4953 /* Blocks for super block, inode, quota and xattr blocks */ ext4_meta_trans_blocks() 4976 /* Account for data blocks for journalled mode */ ext4_writepage_trans_blocks() 4986 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4988 * journal buffers for data blocks are not included here, as DIO 5139 * Also, dquot_alloc_block() will always dirty the inode when blocks 5218 /* We have to allocate physical blocks for delalloc blocks ext4_change_inode_journal_flag() 5219 * before flushing journal. otherwise delalloc blocks can not ext4_change_inode_journal_flag() 5220 * be allocated any more. even more truncate on delalloc blocks ext4_change_inode_journal_flag() 5221 * could trigger BUG by flushing delalloc blocks in journal. ext4_change_inode_journal_flag()
|
H A D | xattr.h | 11 /* Magic value in attribute blocks */ 31 __le32 h_blocks; /* number of disk blocks used */
|
H A D | migrate.c | 20 * The contiguous blocks details which can be 213 * We are freeing a blocks. During this we touch extend_credit_for_blkdel() 359 * Update i_blocks with the new blocks that got ext4_ext_swap_inode_data() 361 * blocks. ext4_ext_swap_inode_data() 364 * update the orignal inode i_blocks for extent blocks ext4_ext_swap_inode_data() 374 * i_blocks when freeing the indirect meta-data blocks ext4_ext_swap_inode_data() 413 * Free the extent meta data blocks only 423 * No extra blocks allocated for extent meta data free_ext_block() 507 * new blocks we fail migrate. New block allocation will ext4_ext_migrate() 639 * blocks to be allocated, otherwise delayed allocation blocks may not ext4_ind_migrate()
|
H A D | extents.c | 307 * Calculate the number of metadata blocks needed 308 * to allocate @blocks 321 * previous da block, it can share index blocks with the ext4_ext_calc_metadata_amount() 323 * block every idxs leaf blocks. At ldxs**2 blocks, we need ext4_ext_calc_metadata_amount() 324 * an additional index block, and at ldxs**3 blocks, yet ext4_ext_calc_metadata_amount() 325 * another index blocks. ext4_ext_calc_metadata_amount() 345 * In the worst case we need a new set of index blocks at ext4_ext_calc_metadata_amount() 584 /* Don't cache anything if there are no external extent blocks */ ext4_ext_precache() 1016 * - allocates all needed blocks (new leaf and all intermediate index blocks) 1019 * into the newly allocated blocks 1034 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ ext4_ext_split() 1066 * Get array to track all allocated blocks. ext4_ext_split() 1067 * We need this to handle errors and free blocks ext4_ext_split() 1074 /* allocate all needed blocks */ ext4_ext_split() 1075 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); ext4_ext_split() 1242 /* free all allocated blocks in error case */ ext4_ext_split() 1366 * so subsequent data blocks should be contiguous */ ext4_ext_create_new_leaf() 1435 /* usually extent in the path covers blocks smaller ext4_ext_search_left() 1505 /* usually extent in the path covers blocks smaller ext4_ext_search_right() 1633 /* zero-tree has no leaf blocks at all */ ext4_ext_next_leaf_block() 2424 * bitmaps and block group descriptor blocks ext4_ext_calc_credits_for_single_extent() 2425 * and other metadata blocks still need to be ext4_ext_calc_credits_for_single_extent() 2438 * How many index/leaf blocks need to change/allocate to add @extents extents? 2540 ext_debug("free last %u blocks starting %llu partial %lld\n", ext4_remove_blocks() 2549 * operation has removed all of the blocks in the cluster. ext4_remove_blocks() 2555 * removing blocks. If there's a partial cluster here it's ext4_remove_blocks() 2576 * blocks appearing between "start" and "end". Both "start" 2649 * freeing it when removing blocks. Eventually, the ext4_ext_rm_leaf() 2709 * Do not mark unwritten if all the blocks in the ext4_ext_rm_leaf() 2754 * truncated/punched region and we're done removing blocks. ext4_ext_rm_leaf() 2836 /* Leaf not may not exist only if inode has no blocks at all */ ext4_ext_remove_space() 2862 * in use to avoid freeing it when removing blocks. ext4_ext_remove_space() 2901 * We start scanning from right side, freeing all the blocks ext4_ext_remove_space() 3010 * even the first extent, then we should free the blocks in the partial ext4_ext_remove_space() 3089 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", ext4_ext_release() 3386 * - the returned value is the number of blocks beyond map->l_lblk 3432 * Attempt to transfer newly initialized blocks from the currently ext4_ext_convert_to_initialized() 3461 * A transfer of blocks from 'ex' to 'abut_ex' is allowed ext4_ext_convert_to_initialized() 3466 * - C4: abut_ex can receive the additional blocks without ext4_ext_convert_to_initialized() 3480 /* Shift the start of ex by 'map_len' blocks */ ext4_ext_convert_to_initialized() 3486 /* Extend abut_ex by 'map_len' blocks */ ext4_ext_convert_to_initialized() 3489 /* Result: number of initialized blocks past m_lblk */ ext4_ext_convert_to_initialized() 3507 * A transfer of blocks from 'ex' to 'abut_ex' is allowed ext4_ext_convert_to_initialized() 3512 * - C4: abut_ex can receive the additional blocks without ext4_ext_convert_to_initialized() 3526 /* Shift the start of abut_ex by 'map_len' blocks */ ext4_ext_convert_to_initialized() 3532 /* Extend abut_ex by 'map_len' blocks */ ext4_ext_convert_to_initialized() 3535 /* Result: number of initialized blocks past m_lblk */ ext4_ext_convert_to_initialized() 3645 * One of more index blocks maybe needed if the extent tree grow after 3858 * This function is called when we are writing out the blocks that were 3862 * ('=' indicated delayed allocated blocks 3863 * '-' indicates non-delayed allocated blocks) 4023 * allocate metadata blocks for the new extent block if needed. ext4_ext_handle_unwritten_extents() 4078 * We have blocks reserved already. We ext4_ext_handle_unwritten_extents() 4079 * return allocated blocks so that delalloc ext4_ext_handle_unwritten_extents() 4100 * if we allocated more blocks than requested ext4_ext_handle_unwritten_extents() 4159 * find blocks that were already in the inode's extent tree. Hence, 4260 * return > 0, number of of blocks already mapped/allocated 4261 * if create == 0 and these are pre-allocated blocks 4263 * otherwise blocks are mapped 4265 * return = 0, if plain look up failed (blocks have not been allocated) 4286 ext_debug("blocks %u/%u requested for inode %lu\n", ext4_ext_map_blocks() 4332 /* number of remaining blocks in the extent */ ext4_ext_map_blocks() 4392 /* find neighbour allocated blocks */ ext4_ext_map_blocks() 4414 * See if request is beyond maximum number of blocks we can have in ext4_ext_map_blocks() 4508 /* free data blocks we just allocated */ ext4_ext_map_blocks() 4525 * Update reserved blocks/metadata blocks after successful ext4_ext_map_blocks() 4548 * cluster has 4 blocks. Thus, the clusters ext4_ext_map_blocks() 4551 * logical blocks 10 & 11. Since there were no ext4_ext_map_blocks() 4552 * previous delayed allocated blocks in the ext4_ext_map_blocks() 4555 * * Next comes write for logical blocks 3 to 8. ext4_ext_map_blocks() 4558 * that range has a delayed allocated blocks. ext4_ext_map_blocks() 4561 * time, we will first write blocks [3-8] and ext4_ext_map_blocks() 4563 * blocks. Also, we would claim all these ext4_ext_map_blocks() 4566 * blocks [10-11], we would expect to claim ext4_ext_map_blocks() 4569 * more delayed allocated blocks in the range ext4_ext_map_blocks() 4575 * allocated blocks outside of our current ext4_ext_map_blocks() 4578 * remaining blocks finally gets written, we ext4_ext_map_blocks() 4588 * We will claim quota for all newly allocated blocks. ext4_ext_map_blocks() 4760 * blocks, so convert interior block aligned part of the range to ext4_zero_range() 4847 * blocks and update the inode ext4_zero_range() 4890 * of writing zeroes to the required new blocks (the same behavior which is 4905 * range since we would need to re-encrypt blocks with a ext4_fallocate() 4987 * This function convert a range of blocks to written extents 5305 * block for the file are shifted downwards by shift blocks. 5537 * @count: Number of blocks to swap 5704 * Looks scarry ah..? second inode already points to new blocks, ext4_swap_extents()
|
/linux-4.1.27/fs/ |
H A D | mpage.c | 37 * If a page does not map to a contiguous run of blocks then it simply falls 132 * blocks and constructs largest possible bios, submits them for IO if the 133 * blocks are not contiguous on the disk. 151 sector_t blocks[MAX_BUF_PER_PAGE]; do_mpage_readpage() local 171 * Map blocks using the result from the previous get_blocks call first. do_mpage_readpage() 186 blocks[page_block] = map_bh->b_blocknr + map_offset + do_mpage_readpage() 232 /* Contiguous blocks? */ do_mpage_readpage() 233 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) do_mpage_readpage() 242 blocks[page_block] = map_bh->b_blocknr+relative_block; do_mpage_readpage() 269 if (bio && (*last_block_in_bio != blocks[0] - 1)) do_mpage_readpage() 275 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), do_mpage_readpage() 279 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), do_mpage_readpage() 298 *last_block_in_bio = blocks[blocks_per_page - 1]; do_mpage_readpage() 322 * This function walks the pages and the blocks within each page, building and 329 * - encountering a page with non-contiguous blocks 339 * the disk mappings may require I/O. Reads of indirect blocks, for example. 341 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be 345 * because the indirect block has to be read to get the mappings of blocks 418 * If all blocks are found to be contiguous then the page can go into the 474 sector_t blocks[MAX_BUF_PER_PAGE]; __mpage_writepage() local 512 if (bh->b_blocknr != blocks[page_block-1] + 1) __mpage_writepage() 515 blocks[page_block++] = bh->b_blocknr; __mpage_writepage() 557 if (map_bh.b_blocknr != blocks[page_block-1] + 1) __mpage_writepage() 560 blocks[page_block++] = map_bh.b_blocknr; __mpage_writepage() 592 if (bio && mpd->last_block_in_bio != blocks[0] - 1) __mpage_writepage() 598 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), __mpage_writepage() 604 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), __mpage_writepage() 633 mpd->last_block_in_bio = blocks[blocks_per_page - 1]; __mpage_writepage()
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_alloc.h | 62 * data blocks, freelist blocks, and potential file data bmap 63 * btree blocks. However, these restrictions may result in no 70 * this, we explicitly set aside a few blocks which will not be 72 * needed freelist blocks is 4 fsbs _per AG_, a potential split of file's bmap 73 * btree requires 1 fsb, so we set the number of set-aside blocks 81 * blocks in the AG - some are permanently used by metadata. These 82 * blocks are generally: 84 * - the AGF (bno and cnt) and AGI btree root blocks 85 * - 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits 88 * dependent on filesystem geometry. The others are all single blocks. 111 xfs_extlen_t minleft; /* min blocks must be left after us */ 112 xfs_extlen_t total; /* total blocks needed in xaction */ 120 char isfl; /* set if is freelist blocks - !acctg */
|
H A D | xfs_trans_resv.c | 87 * - two inode forks containing bmap btree root blocks. 176 * And the bmap_finish transaction can free bmap blocks in a join: 177 * the agfs of the ags containing the blocks: 2 * sector size 178 * the agfls of the ags containing the blocks: 2 * sector size 202 * And the bmap_finish transaction can free the blocks and bmap blocks: 205 * the super block to reflect the freed blocks: sector size 234 * And the bmap_finish transaction can free dir and bmap blocks (two sets 235 * of bmap blocks) giving: 236 * the agf for the ags in which the blocks live: 3 * sector size 237 * the agfl for the ags in which the blocks live: 3 * sector size 273 * And the bmap_finish transaction can free some bmap blocks giving: 274 * the agf for the ag in which the blocks live: sector size 275 * the agfl for the ag in which the blocks live: sector size 311 * And the bmap_finish transaction can free the dir and bmap blocks giving: 312 * the agf for the ag in which the blocks live: 2 * sector size 313 * the agfl for the ag in which the blocks live: 2 * sector size 362 * the inode blocks allocated: mp->m_ialloc_blks * blocksize 452 * with the added blocks for remote symlink data which can be up to 1kB in 544 * In the second set of transactions (ZERO) we zero the new metadata blocks. 557 * allocating any new blocks. 562 * summary blocks: new summary size 601 * bmap blocks for the new directory block 622 * And the bmap_finish transaction can free the blocks and bmap blocks: 625 * the super block to reflect the freed blocks: sector size 667 * blocks needed for the 1st bmap, here we calculate out the space unit for 669 * to the attibute extent length in blocks by: 686 * And the bmap_finish transaction can free the attr blocks freed giving: 687 * the agf for the ag in which the blocks live: 2 * sector size 688 * the agfl for the ag in which the blocks live: 2 * sector size
|
H A D | xfs_format.h | 100 xfs_rfsblock_t sb_dblocks; /* number of data blocks */ 101 xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */ 108 xfs_agblock_t sb_rextsize; /* realtime extent size, blocks */ 111 xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */ 112 xfs_extlen_t sb_logblocks; /* number of log blocks */ 134 __uint64_t sb_fdblocks; /* free data blocks */ 190 __be64 sb_dblocks; /* number of data blocks */ 191 __be64 sb_rblocks; /* number of realtime blocks */ 198 __be32 sb_rextsize; /* realtime extent size, blocks */ 201 __be32 sb_rbmblocks; /* number of rt bitmap blocks */ 202 __be32 sb_logblocks; /* number of log blocks */ 224 __be64 sb_fdblocks; /* free data blocks */ 587 __be32 agf_length; /* size in blocks of a.g. */ 591 __be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */ 598 __be32 agf_flcount; /* count of blocks in freelist */ 599 __be32 agf_freeblks; /* total free blocks */ 602 __be32 agf_btreeblks; /* # of blocks held in AGF btrees */ 670 __be32 agi_length; /* size in blocks of a.g. */ 728 * of block pointers to blocks owned by the allocation btree code. 837 __be64 di_nblocks; /* # of direct & btree blocks used */ 971 #define XFS_DIFLAG_REALTIME_BIT 0 /* file's blocks come from rt area */ 1103 __be64 d_bcount; /* disk blocks owned by the user */ 1107 __be32 d_btimer; /* similar to above; for disk blocks */ 1109 __be16 d_bwarns; /* warnings issued wrt disk blocks */ 1113 __be64 d_rtbcount; /* realtime blocks owned */ 1114 __be32 d_rtbtimer; /* similar to above; for RT disk blocks */ 1115 __be16 d_rtbwarns; /* warnings issued wrt RT disk blocks */ 1171 * by blockcount and blockno. All blocks look the same to make the code 1184 __be32 ar_blockcount; /* count of free blocks */ 1189 xfs_extlen_t ar_blockcount; /* count of free blocks */ 1272 * and the record/pointer formats for the leaf/node in the blocks. 1355 xfs_filblks_t br_blockcount; /* number of blocks */ 1416 /* sizes of CRC enabled btree blocks */
|
H A D | xfs_trans_resv.h | 73 * 2 trees * (2 blocks/level * max depth - 1) * block size 82 * dir blocks: (1 btree block per level + data block + free block) * dblock size 84 * v2 directory blocks can be fragmented below the dirblksize down to the fsb
|
H A D | xfs_da_format.h | 25 * It is used to manage a doubly linked list of all blocks at the same 28 #define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */ 48 #define XFS_DA3_NODE_MAGIC 0x3ebe /* magic number: non-leaf blocks */ 127 * - multiple data blocks, single leaf+freeindex block 128 * - data blocks, node and leaf blocks (btree), freeindex blocks 130 * Note: many node blocks structures and constants are shared with the attr 136 #define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F: free index blocks */ 153 * in the blocks rather than feature bits in the superblock. This means the code 161 * offsets of all the structures inside the blocks are different. 168 #define XFS_DIR3_FREE_MAGIC 0x58444633 /* XDF3: free index blocks */ 310 * In addition to the pure data blocks for the data and node formats, 339 * Header for the data blocks. 437 * for single-leaf (magic = XFS_DIR2_LEAF1_MAGIC) blocks only, but not present 438 * for directories with separate leaf nodes and free space blocks 856 * length and the number of blocks needed to store the attribute. This makes the 859 * the number of blocks needed to store the attribute data.
|
/linux-4.1.27/drivers/ide/ |
H A D | ide-floppy.c | 196 int blocks = blk_rq_sectors(rq) / floppy->bs_factor; idefloppy_create_rw_cmd() local 199 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks); idefloppy_create_rw_cmd() 203 put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]); idefloppy_create_rw_cmd() 354 lba_capacity = floppy->blocks * floppy->block_size; ide_floppy_get_flexible_disk_page() 360 floppy->blocks = floppy->block_size ? ide_floppy_get_flexible_disk_page() 362 drive->capacity64 = floppy->blocks * floppy->bs_factor; ide_floppy_get_flexible_disk_page() 379 int i, rc = 1, blocks, length; ide_floppy_get_capacity() local 385 floppy->blocks = 0; ide_floppy_get_capacity() 401 blocks = be32_to_cpup((__be32 *)&pc_buf[desc_start]); ide_floppy_get_capacity() 404 ide_debug_log(IDE_DBG_PROBE, "Descriptor %d: %dkB, %d blocks, " ide_floppy_get_capacity() 406 i, blocks * length / 1024, ide_floppy_get_capacity() 407 blocks, length); ide_floppy_get_capacity() 427 printk(KERN_INFO PFX "%s: %dkB, %d blocks, %d " ide_floppy_get_capacity() 429 drive->name, blocks * length / 1024, ide_floppy_get_capacity() 430 blocks, length); ide_floppy_get_capacity() 437 floppy->blocks = blocks; ide_floppy_get_capacity() 446 floppy->blocks * floppy->bs_factor; ide_floppy_get_capacity()
|
H A D | ide-gd.h | 33 int blocks, block_size, bs_factor; member in struct:ide_disk_obj
|
H A D | ide-floppy_ioctl.c | 41 int i, blocks, length, u_array_size, u_index; ide_floppy_get_format_capacities() local 75 blocks = be32_to_cpup((__be32 *)&pc_buf[desc_start]); ide_floppy_get_format_capacities() 78 if (put_user(blocks, argp)) ide_floppy_get_format_capacities() 143 int blocks, length, flags, err = 0; ide_floppy_format_unit() local 168 if (get_user(blocks, arg) || ide_floppy_format_unit() 176 ide_floppy_create_format_unit_cmd(pc, buf, blocks, length, flags); ide_floppy_format_unit()
|
/linux-4.1.27/mm/ |
H A D | dmapool.c | 12 * This allocator returns small blocks of a given size which are DMA-able by 14 * new pages, then splits them up into blocks of the required size. 19 * allocated pages. Each page in the page_list is split into blocks of at 20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 21 * list of free blocks within the page. Used blocks aren't tracked, but we 86 unsigned blocks = 0; show_pools() local 91 blocks += page->in_use; show_pools() 97 pool->name, blocks, show_pools() 111 * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 114 * @size: size of the blocks in this pool. 115 * @align: alignment requirement for blocks; must be a power of two 116 * @boundary: returned blocks won't cross this power of two boundary 123 * cache flushing primitives. The actual size of blocks allocated may be 263 * dma_pool_destroy - destroys a pool of dma memory blocks. 491 * @size: size of the blocks in this pool. 492 * @align: alignment requirement for blocks; must be a power of two 493 * @allocation: returned blocks won't cross this boundary (or zero)
|
/linux-4.1.27/fs/freevxfs/ |
H A D | vxfs.h | 83 int32_t vs_size; /* number of blocks */ 84 int32_t vs_dsize; /* number of data blocks */ 104 int32_t vs_aulen; /* length of AU in blocks */ 105 int32_t vs_auimlen; /* length of imap in blocks */ 106 int32_t vs_auemlen; /* length of emap in blocks */ 107 int32_t vs_auilen; /* length of ilist in blocks */ 108 int32_t vs_aupad; /* length of pad in blocks */ 109 int32_t vs_aublocks; /* data blocks in AU */ 126 int32_t vs_free; /* number of free blocks */ 147 int32_t vs_iausize; /* size of IAU in blocks */
|
H A D | vxfs_inode.h | 85 int32_t vt_size; /* Size in blocks */ 91 u_int64_t vd4_size; /* Size in blocks */ 128 u_int32_t vdi_blocks; /* How much blocks does inode occupy */
|
/linux-4.1.27/drivers/gpu/drm/msm/mdp/mdp5/ |
H A D | mdp5_smp.c | 31 * blocks must be allocated to that client out of the shared pool. 33 * In some hw, some blocks are statically allocated for certain pipes 41 * blocks needed per client, and request. Blocks not inuse or 51 * MDP5_SMP_ALLOC registers if there are newly unused blocks 54 * client's pending blocks become it's in-use blocks (and no-longer 55 * in-use blocks become available to other clients). 110 /* step #1: update # of blocks pending for the client: */ smp_request_block() 207 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); mdp5_smp_request() 210 dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", mdp5_smp_request() 223 /* Release SMP blocks for all clients of the pipe */ mdp5_smp_release() 294 * Figure out if there are any blocks we where previously mdp5_smp_commit() 302 /* clear released blocks: */ mdp5_smp_commit()
|
/linux-4.1.27/drivers/md/persistent-data/ |
H A D | dm-transaction-manager.h | 19 * of the on-disk data structures by limiting access to writeable blocks. 45 * unlock the superblock and flush it. No other blocks should be updated 60 * a data leak. (The other option is for tm_new_block() to zero new blocks 80 * other blocks.) 113 * requested blocks that weren't in core. This call will request those 114 * blocks to be prefetched.
|
H A D | dm-space-map-metadata.h | 18 * index entry contains allocation info about ~16k metadata blocks.
|
H A D | dm-transaction-manager.c | 28 dm_block_t blocks[PREFETCH_SIZE]; member in struct:prefetch_set 40 p->blocks[i] = PREFETCH_SENTINEL; prefetch_wipe() 54 if (p->blocks[h] == PREFETCH_SENTINEL) prefetch_add() 55 p->blocks[h] = b; prefetch_add() 67 if (p->blocks[i] != PREFETCH_SENTINEL) { prefetch_issue() 68 dm_bm_prefetch(bm, p->blocks[i]); prefetch_issue() 69 p->blocks[i] = PREFETCH_SENTINEL; prefetch_issue() 255 * New blocks count as shadows in that they don't need to be dm_tm_new_block()
|
H A D | dm-block-manager.h | 103 * this function. All dirty blocks are guaranteed to be written and flushed 106 * This method always blocks.
|
/linux-4.1.27/fs/qnx4/ |
H A D | bitmap.c | 33 printk(KERN_ERR "qnx4: I/O error in counting free blocks\n"); qnx4_count_free_blocks()
|
/linux-4.1.27/fs/quota/ |
H A D | quota_tree.h | 23 #define QT_TREEOFF 1 /* Offset of tree in file in blocks */
|
H A D | quotaio_v2.h | 35 * to blocks of these structures. 67 __le32 dqi_blocks; /* Number of blocks in file */
|
H A D | quota_v1.c | 26 static inline qsize_t v1_qbtos(qsize_t blocks) v1_qbtos() argument 28 return blocks << QUOTABLOCK_BITS; v1_qbtos() 128 ulong blocks; v1_check_quota_file() local 138 blocks = isize >> BLOCK_SIZE_BITS; v1_check_quota_file() 140 if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % v1_check_quota_file()
|
/linux-4.1.27/fs/minix/ |
H A D | itree_common.c | 327 /* Clear the ends of indirect blocks on the shared branch */ truncate() 353 unsigned blocks, res, direct = DIRECT, i = DEPTH; nblocks() local 354 blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k); nblocks() 355 res = blocks; nblocks() 356 while (--i && blocks > direct) { nblocks() 357 blocks -= direct; nblocks() 358 blocks += sb->s_blocksize/sizeof(block_t) - 1; nblocks() 359 blocks /= sb->s_blocksize/sizeof(block_t); nblocks() 360 res += blocks; nblocks()
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | itcw.h | 2 * Functions for incremental construction of fcx enabled I/O control blocks.
|
/linux-4.1.27/arch/cris/arch-v10/lib/ |
H A D | dmacopy.c | 2 * memcpy for large blocks, using memory-memory DMA channels 6 and 7 in Etrax
|
/linux-4.1.27/arch/arm/mach-omap2/ |
H A D | cm44xx.h | 13 * OMAP4 has two separate CM blocks, CM1 and CM2. This file contains
|
H A D | prcm44xx.h | 14 * the PRM/CM/PRCM blocks on the OMAP4 devices: PRM, CM1, CM2,
|
/linux-4.1.27/fs/befs/ |
H A D | datastream.c | 155 * befs_count_blocks - blocks used by a file 159 * Counts the number of fs blocks that the file represented by 168 befs_blocknr_t blocks; befs_count_blocks() local 169 befs_blocknr_t datablocks; /* File data blocks */ befs_count_blocks() 170 befs_blocknr_t metablocks; /* FS metadata blocks */ befs_count_blocks() 186 Double indir block, plus all the indirect blocks it mapps befs_count_blocks() 188 BEFS_DBLINDIR_BRUN_LEN blocks long. Therefore, we know befs_count_blocks() 190 and from that we know how many indirect blocks it takes to befs_count_blocks() 191 map them. We assume that the indirect blocks are also befs_count_blocks() 192 BEFS_DBLINDIR_BRUN_LEN blocks long. befs_count_blocks() 209 blocks = datablocks + metablocks; befs_count_blocks() 210 befs_debug(sb, "<--- %s %u blocks", __func__, (unsigned int)blocks); befs_count_blocks() 212 return blocks; befs_count_blocks() 232 keeps a count of the number of blocks searched so far (sum), 326 /* Examine blocks of the indirect run one at a time */ befs_find_brun_indirect() 387 They are always allocated 4 fs blocks at a time, so each 399 Oh, and once we've done that, we actually read in the blocks 401 though the double-indirect run may be several blocks long, 402 we can calculate which of those blocks will contain the index 438 /* number of data blocks mapped by each of the iaddrs in befs_find_brun_dblindirect() 443 /* number of data blocks mapped by each of the iaddrs in befs_find_brun_dblindirect() 451 /* First, discover which of the double_indir->indir blocks befs_find_brun_dblindirect()
|
H A D | befs_fs_types.h | 33 /* The datastream blocks mapped by the double-indirect 34 * block are always 4 fs blocks long. 36 * the potentially huge number of indirect blocks 38 * Err. Should that be 4 fs blocks or 4k???
|
/linux-4.1.27/sound/soc/intel/common/ |
H A D | sst-firmware.c | 443 /* remove all scratch blocks */ sst_fw_unload() 616 /* allocate first free DSP blocks for data - callers hold locks */ block_alloc() 626 /* find first free whole blocks that can hold module */ block_alloc() 629 /* ignore blocks with wrong type */ block_alloc() 645 /* then find free multiple blocks that can hold module */ block_alloc() 648 /* ignore blocks with wrong type */ block_alloc() 652 /* do we span > 1 blocks */ block_alloc() 681 dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret); sst_alloc_blocks() 685 /* prepare DSP blocks for module usage */ sst_alloc_blocks() 705 /* allocate memory blocks for static module addresses - callers hold locks */ block_alloc_fixed() 714 /* only IRAM/DRAM blocks are managed */ block_alloc_fixed() 718 /* are blocks already attached to this module */ list_for_each_entry_safe() 721 /* ignore blocks with wrong type */ list_for_each_entry_safe() 741 /* module already owns blocks */ list_for_each_entry_safe() 746 /* find first free blocks that can hold section in free list */ 750 /* ignore blocks with wrong type */ 786 /* Load fixed module data into DSP memory blocks */ sst_module_alloc_blocks() 804 /* alloc blocks that includes this section */ sst_module_alloc_blocks() 808 "error: no free blocks for section at offset 0x%x size 0x%x\n", sst_module_alloc_blocks() 814 /* prepare DSP blocks for module copy */ sst_module_alloc_blocks() 821 /* copy partial module data to blocks */ sst_module_alloc_blocks() 882 /* alloc blocks that includes this section */ sst_module_runtime_alloc_blocks() 889 /* alloc blocks that includes this section */ sst_module_runtime_alloc_blocks() 894 "error: no free blocks for runtime module size 0x%x\n", sst_module_runtime_alloc_blocks() 901 /* prepare DSP blocks for module copy */ sst_module_runtime_alloc_blocks() 1048 /* unregister all DSP memory blocks */ sst_mem_block_unregister_all() 1055 /* unregister used blocks */ sst_mem_block_unregister_all() 1061 /* unregister free blocks */ sst_mem_block_unregister_all() 1071 /* allocate scratch buffer blocks */ sst_block_alloc_scratch() 1098 /* allocate blocks for module scratch buffers */ sst_block_alloc_scratch() 1099 dev_dbg(dsp->dev, "allocating scratch blocks\n"); sst_block_alloc_scratch() 1112 /* alloc blocks that includes this section */ sst_block_alloc_scratch() 1123 dev_err(dsp->dev, "error: can't alloc scratch blocks\n"); sst_block_alloc_scratch() 1142 /* free all scratch blocks */ sst_block_free_scratch()
|
H A D | sst-dsp-priv.h | 140 * Block Allocator - Used to allocate blocks of DSP memory. 162 struct list_head block_list; /* list of blocks used */ 189 * ADSP memory blocks. The simplest FW will be a file with 1 module. A module 212 struct list_head block_list; /* Module list of blocks in use */ 232 * SST ADP memory has multiple IRAM and DRAM blocks. Some ADSP blocks can be 252 struct list_head module_list; /* Module list of blocks */ 253 struct list_head list; /* Map list of free/used blocks */ 271 /* list of free and used ADSP memory blocks */ 361 /* Register the DSPs memory blocks - would be nice to read from ACPI */
|
/linux-4.1.27/drivers/mtd/tests/ |
H A D | speedtest.c | 54 static int multiblock_erase(int ebnum, int blocks) multiblock_erase() argument 63 ei.len = mtd->erasesize * blocks; multiblock_erase() 67 pr_err("error %d while erasing EB %d, blocks %d\n", multiblock_erase() 68 err, ebnum, blocks); multiblock_erase() 74 "blocks %d\n", ebnum, blocks); multiblock_erase() 195 int err, i, blocks, j, k; mtd_speedtest_init() local 391 blocks = 1 << k; mtd_speedtest_init() 393 blocks); mtd_speedtest_init() 396 for (j = 0; j < blocks && (i + j) < ebcnt; j++) mtd_speedtest_init() 416 blocks, speed); mtd_speedtest_init()
|
/linux-4.1.27/fs/sysv/ |
H A D | itree.c | 4 * Handling of indirect blocks' trees. 402 /* Clear the ends of indirect blocks on the shared branch */ sysv_truncate() 432 unsigned blocks, res, direct = DIRECT, i = DEPTH; sysv_nblocks() local 433 blocks = (size + s->s_blocksize - 1) >> s->s_blocksize_bits; sysv_nblocks() 434 res = blocks; sysv_nblocks() 435 while (--i && blocks > direct) { sysv_nblocks() 436 blocks = ((blocks - direct - 1) >> ptrs_bits) + 1; sysv_nblocks() 437 res += blocks; sysv_nblocks() 440 return blocks; sysv_nblocks() 447 stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size); sysv_getattr()
|
H A D | balloc.c | 19 * This file contains code for allocating/freeing blocks. 46 sysv_zone_t *blocks = sbi->s_bcache; sysv_free_block() local 85 memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t)); sysv_free_block() local 164 sysv_zone_t *blocks; sysv_count_free_blocks() local 185 blocks = sbi->s_bcache; sysv_count_free_blocks() 191 while (n && (zone = blocks[--n]) != 0) sysv_count_free_blocks() 207 blocks = get_chunk(sb, bh); sysv_count_free_blocks()
|
/linux-4.1.27/fs/logfs/ |
H A D | logfs_abi.h | 25 * Throughout the logfs code, we're constantly dealing with blocks at 38 * 1 - i1 indirect blocks 39 * 2 - i2 indirect blocks 40 * 3 - i3 indirect blocks 41 * 4 - i4 indirect blocks 42 * 5 - i5 indirect blocks 43 * 6 - ifile data blocks 44 * 7 - ifile i1 indirect blocks 45 * 8 - ifile i2 indirect blocks 46 * 9 - ifile i3 indirect blocks 47 * 10 - ifile i4 indirect blocks 48 * 11 - ifile i5 indirect blocks 50 * 12 - gc recycled blocks, long-lived data 51 * 13 - replacement blocks, short-lived data 58 * used to replace older blocks in existing files is expected to be 70 * by something superblock-dependent. Pointers in indirect blocks are and 82 * Number of blocks at various levels of indirection. There are 16 direct 122 * Data blocks reside on level 0, 1x indirect block on level 1, etc. 123 * Inodes reside on level 6, indirect blocks for the inode file on levels 7-11. 127 * LOGFS_MAX_INDIRECT is the maximal indirection through indirect blocks, 220 * @ds_bad_seg_reserve: number of segments reserved to handle bad blocks
|
/linux-4.1.27/crypto/ |
H A D | sha1_generic.c | 30 int blocks) sha1_generic_block_fn() 34 while (blocks--) { sha1_generic_block_fn() 29 sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, int blocks) sha1_generic_block_fn() argument
|
/linux-4.1.27/arch/m32r/include/uapi/asm/ |
H A D | stat.h | 68 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 70 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
|
/linux-4.1.27/net/core/ |
H A D | netevent.c | 58 * call_netevent_notifiers - call all netevent notifier blocks 62 * Call all neighbour notifier blocks. Parameters and return value
|
/linux-4.1.27/sound/firewire/ |
H A D | amdtp.h | 47 * @CIP_SFC_32000: 32,000 data blocks 48 * @CIP_SFC_44100: 44,100 data blocks 49 * @CIP_SFC_48000: 48,000 data blocks 50 * @CIP_SFC_88200: 88,200 data blocks 51 * @CIP_SFC_96000: 96,000 data blocks 52 * @CIP_SFC_176400: 176,400 data blocks 53 * @CIP_SFC_192000: 192,000 data blocks 59 * represents the number of data blocks transferred per second in an AMDTP
|
/linux-4.1.27/sound/pci/ctxfi/ |
H A D | ctvmem.h | 48 struct list_head unused; /* List of unused blocks */ 49 struct list_head used; /* List of used blocks */
|
/linux-4.1.27/sound/soc/intel/baytrail/ |
H A D | sst-baytrail-dsp.c | 60 u32 blocks; /* # of blocks */ member in struct:sst_byt_fw_module_header 83 for (count = 0; count < module->blocks; count++) { sst_byt_parse_module() 248 int blocks; member in struct:sst_adsp_memregion 329 /* register DSP memory blocks - ideally we should get this from ACPI */ sst_byt_init() 332 size = (region[i].end - region[i].start) / region[i].blocks; sst_byt_init() 334 /* register individual memory blocks */ sst_byt_init() 335 for (j = 0; j < region[i].blocks; j++) { sst_byt_init()
|
/linux-4.1.27/include/linux/mtd/ |
H A D | inftl.h | 44 unsigned int nb_blocks; /* number of physical blocks */ 45 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
|
H A D | nftl.h | 50 unsigned int nb_blocks; /* number of physical blocks */ 51 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
|
H A D | bbm.h | 44 * @maxblocks: maximum number of blocks to search for a bbt. This number of 45 * blocks is reserved at the end of the device where the tables are 50 * bad blocks, can be NULL, if len = 0 53 * pattern which identifies good and bad blocks. The assumption is made 125 /* The maximum number of blocks to scan for a bbt */
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/ |
H A D | iwl-agn-hw.h | 106 /* high blocks contain PAPD data */ 109 #define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ 110 #define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ 111 #define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ 112 #define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
|
/linux-4.1.27/kernel/ |
H A D | groups.c | 29 group_info->blocks[0] = group_info->small_block; groups_alloc() 36 group_info->blocks[i] = b; groups_alloc() 43 free_page((unsigned long)group_info->blocks[i]); groups_alloc() 53 if (group_info->blocks[0] != group_info->small_block) { groups_free() 56 free_page((unsigned long)group_info->blocks[i]); groups_free()
|
/linux-4.1.27/include/linux/mfd/ |
H A D | stmpe.h | 115 extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); 116 extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); 164 * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*) 175 unsigned int blocks; member in struct:stmpe_platform_data
|
/linux-4.1.27/fs/hfs/ |
H A D | hfs.h | 12 /* offsets to various blocks */ 17 /* magic numbers for various disk blocks */ 45 #define HFS_BAD_CNID 5 /* BAD blocks file */ 161 __be16 FABN; /* allocation blocks number*/ 246 __be16 drVBMSt; /* location (in 512-byte blocks) 248 __be16 drAllocPtr; /* location (in allocation blocks) 250 __be16 drNmAlBlks; /* number of allocation blocks */ 254 __be16 drAlBlSt; /* location (in 512-byte blocks) 258 __be16 drFreeBks; /* number of free allocation blocks */ 272 and number of allocation blocks
|
H A D | part_tbl.c | 18 * block) containing one of these structures. These blocks are 24 __be32 pmMapBlkCnt; /* partition blocks count */
|
/linux-4.1.27/fs/squashfs/ |
H A D | file.c | 26 * consists of a sequence of contiguous compressed blocks, and/or a 29 * file inode (itself stored in one or more compressed metadata blocks). 37 * is split into slots, caching up to eight 224 GiB files (128 KiB blocks). 170 * Read the next n blocks from the block list, starting from 186 int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); read_indexes() local 189 offset, blocks << 2); read_indexes() 196 for (i = 0; i < blocks; i++) { read_indexes() 200 n -= blocks; read_indexes() 219 * the number of metadata blocks that need to be read fits into the cache. 223 static inline int calculate_skip(int blocks) calculate_skip() argument 225 int skip = blocks / ((SQUASHFS_META_ENTRIES + 1) calculate_skip() 284 int blocks = skip * SQUASHFS_META_INDEXES; fill_meta_index() local 285 long long res = read_indexes(inode->i_sb, blocks, fill_meta_index()
|
/linux-4.1.27/drivers/net/wireless/ti/wlcore/ |
H A D | tx.h | 65 * Number of extra memory blocks to allocate for this packet 66 * in addition to the number of blocks derived from the packet 71 * Total number of memory blocks allocated by the host for 73 * blocks number allocated by HW. 80 * Total number of memory blocks allocated by the host for 87 * of SDIO blocks. 94 * Total number of memory blocks allocated by the host for
|
/linux-4.1.27/fs/ocfs2/ |
H A D | journal.h | 232 * the number of blocks that will be changed during 304 /* quota blocks */ 425 * worth of blocks for initial extent. */ ocfs2_add_dir_index_credits() 431 * alloc group descriptor + mkdir/symlink blocks + dir blocks + xattr 432 * blocks + quota update */ ocfs2_mknod_credits() 523 /* 2 metadata alloc, 2 new blocks and root refcount block */ 547 /* this does not include *new* metadata blocks, which are ocfs2_calc_extend_credits() 549 * prev. last_eb_blk + blocks along edge of tree. ocfs2_calc_extend_credits() 560 int blocks = ocfs2_mknod_credits(sb, 0, 0); ocfs2_calc_symlink_credits() local 564 blocks += ocfs2_clusters_to_blocks(sb, 1); ocfs2_calc_symlink_credits() 566 return blocks + ocfs2_quota_trans_credits(sb); ocfs2_calc_symlink_credits() 572 int blocks; ocfs2_calc_group_alloc_credits() local 575 + bitmap blocks affected */ ocfs2_calc_group_alloc_credits() 576 blocks = 1 + 1 + 1 + bitmap_blocks; ocfs2_calc_group_alloc_credits() 577 return blocks; ocfs2_calc_group_alloc_credits() 606 /* We may be deleting metadata blocks, so metadata alloc dinode + ocfs2_calc_tree_trunc_credits()
|
H A D | blockcheck.h | 27 u64 b_check_count; /* Number of blocks we've checked */ 29 u64 b_recover_count; /* Number of blocks fixed by ecc */
|
H A D | ocfs2_fs.h | 40 * OCFS2_SUPER_BLOCK_BLKNO is in blocks, not sectors. eg, for a 142 /* Support for data packed into inode blocks */ 479 __le64 e_blkno; /* Physical disk offset, in blocks */ 486 __le64 c_blkno; /* Physical disk offset (blocks) of 1st group */ 554 __le64 h_blkno; /* Offset on disk, in blocks */ 559 __le64 h_next_leaf_blk; /* Offset on disk, in blocks, 637 __le64 s_root_blkno; /* Offset, in blocks, of root directory 639 /*30*/ __le64 s_system_dir_blkno; /* Offset, in blocks, of system 722 /*50*/ __le64 i_blkno; /* Offset on disk, in blocks */ 812 /*20*/ __le64 db_blkno; /* Offset on disk, in blocks */ 814 blocks */ 823 * We also store name_len here so as to reduce the number of leaf blocks we 863 __le64 dr_blkno; /* Offset on disk, in blocks */ 901 __le64 dl_blkno; /* Offset on disk, in blocks */ 930 blocks */ 932 blocks */ 933 __le64 bg_blkno; /* Offset on disk, in blocks */ 949 * 2048 blocks (256 bytes of bg_bitmap). This 989 /*10*/ __le64 rf_blkno; /* Offset on disk, in blocks */ 1113 /*10*/ __le64 xb_blkno; /* Offset on disk, in blocks */ 1194 __le32 dqi_blocks; /* Number of blocks in quota file */ 1244 __le32 dqi_blocks; /* Number of blocks allocated for quota file */
|
/linux-4.1.27/drivers/net/wireless/rsi/ |
H A D | rsi_91x_usb_ops.c | 25 * file and writing their values in blocks of data. 29 * @num_blocks: Number of blocks to be written to the card. 127 * file and writing their value in blocks of data. 163 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); rsi_load_ta_instructions()
|
/linux-4.1.27/fs/hfsplus/ |
H A D | part_tbl.c | 20 /* offsets to various blocks */ 25 /* magic numbers for various disk blocks */ 36 * block) containing one of these structures. These blocks are 42 __be32 pmMapBlkCnt; /* partition blocks count */
|
/linux-4.1.27/fs/nilfs2/ |
H A D | segbuf.h | 35 * @nblocks: Number of blocks included in the partial segment 36 * @nsumblk: Number of summary blocks 38 * @nfileblk: Total number of file blocks 67 * @sb_rest_blocks: Number of residual blocks in the current segment
|
H A D | gcinode.c | 2 * gcinode.c - dummy inodes to buffer blocks for garbage collection 26 * This file adds the cache of on-disk blocks to be moved in garbage 27 * collection. The disk blocks are held with dummy inodes (called 32 * time after they are copied to a new log. Dirty blocks made on the 33 * current generation and the blocks to be moved by GC never overlap 34 * because the dirty blocks make a new generation; they rather must be
|
H A D | the_nilfs.h | 53 * @ns_sbh: buffer heads of on-disk super blocks 67 * @ns_ndirtyblks: Number of dirty data blocks 83 * @ns_gc_inodes: dummy inodes to keep live blocks 87 * @ns_resuid: uid for reserved blocks 88 * @ns_resgid: gid for reserved blocks 94 * @ns_blocks_per_segment: number of blocks per segment 247 * @blocks_count: number of blocks
|
H A D | segment.h | 95 * @sc_gc_inodes: List of GC inodes having blocks to be written 105 * @sc_segbuf_nblocks: Number of available blocks in segment buffers. 112 * @sc_nblk_this_inc: Number of blocks included in the current logical segment 192 NILFS_SC_DIRTY, /* One or more dirty meta-data blocks exist */ 215 #define NILFS_SC_DEFAULT_TIMEOUT 5 /* Timeout value of dirty blocks.
|
H A D | file.c | 35 * timing for both data blocks and intermediate blocks. nilfs_sync_file() 104 * fill hole blocks nilfs_page_mkwrite()
|
H A D | recovery.c | 95 * nilfs_compute_checksum - compute checksum of blocks continuously 102 * @nblock: number of blocks to be checked 230 /* This limits the number of blocks read in the CRC check */ nilfs_validate_log() 248 * nilfs_read_summary_info - read an item on summary blocks of a log 250 * @pbh: the current buffer head on summary blocks [in, out] 251 * @offset: the current byte offset on summary blocks [in, out] 277 * nilfs_skip_summary_info - skip items on summary blocks of a log 279 * @pbh: the current buffer head on summary blocks [in, out] 280 * @offset: the current byte offset on summary blocks [in, out] 594 LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */ nilfs_do_roll_forward() 684 printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n", nilfs_do_roll_forward()
|
/linux-4.1.27/include/linux/spi/ |
H A D | flash.h | 18 * Note that for DataFlash, sizes for pages, blocks, and sectors are
|
/linux-4.1.27/arch/blackfin/include/uapi/asm/ |
H A D | stat.h | 55 long long st_blocks; /* Number 512-byte blocks allocated. */
|
/linux-4.1.27/drivers/mmc/card/ |
H A D | block.c | 152 packed->blocks = 0; mmc_blk_clear_packed() 356 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; mmc_blk_ioctl_copy_from_user() 496 data.blocks = idata->ic.blocks; mmc_blk_ioctl_cmd() 543 err = mmc_set_blockcount(card, data.blocks, mmc_blk_ioctl_cmd() 678 __be32 *blocks; mmc_sd_num_wr_blocks() local 703 data.blocks = 1; mmc_sd_num_wr_blocks() 712 blocks = kmalloc(4, GFP_KERNEL); mmc_sd_num_wr_blocks() 713 if (!blocks) mmc_sd_num_wr_blocks() 716 sg_init_one(&sg, blocks, 4); mmc_sd_num_wr_blocks() 720 result = ntohl(*blocks); mmc_sd_num_wr_blocks() 721 kfree(blocks); mmc_sd_num_wr_blocks() 1185 brq->data.blocks = 1; mmc_apply_rel_rw() 1187 if (brq->data.blocks > card->ext_csd.rel_sectors) mmc_apply_rel_rw() 1188 brq->data.blocks = card->ext_csd.rel_sectors; mmc_apply_rel_rw() 1189 else if (brq->data.blocks < card->ext_csd.rel_sectors) mmc_apply_rel_rw() 1190 brq->data.blocks = 1; mmc_apply_rel_rw() 1295 if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) mmc_blk_err_check() 1349 packed->blocks, packed->idx_failure); mmc_blk_packed_err_check() 1391 brq->data.blocks = blk_rq_sectors(req); mmc_blk_rw_rq_prep() 1398 if (brq->data.blocks > card->host->max_blk_count) mmc_blk_rw_rq_prep() 1399 brq->data.blocks = card->host->max_blk_count; mmc_blk_rw_rq_prep() 1401 if (brq->data.blocks > 1) { mmc_blk_rw_rq_prep() 1408 brq->data.blocks = 1; mmc_blk_rw_rq_prep() 1415 brq->data.blocks = card->host->ops->multi_io_quirk(card, mmc_blk_rw_rq_prep() 1418 brq->data.blocks); mmc_blk_rw_rq_prep() 1421 if (brq->data.blocks > 1 || do_rel_wr) { mmc_blk_rw_rq_prep() 1459 ((brq->data.blocks * brq->data.blksz) >= mmc_blk_rw_rq_prep() 1484 brq->sbc.arg = brq->data.blocks | mmc_blk_rw_rq_prep() 1500 if (brq->data.blocks != blk_rq_sectors(req)) { mmc_blk_rw_rq_prep() 1501 int i, data_size = brq->data.blocks << 9; mmc_blk_rw_rq_prep() 1662 packed->blocks = 0; mmc_blk_packed_hdr_wrq_prep() 1679 ((brq->data.blocks * brq->data.blksz) >= mmc_blk_packed_hdr_wrq_prep() 1690 packed->blocks += blk_rq_sectors(prq); mmc_blk_packed_hdr_wrq_prep() 1701 brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); mmc_blk_packed_hdr_wrq_prep() 1711 brq->data.blocks = packed->blocks + hdr_blocks; mmc_blk_packed_hdr_wrq_prep() 1745 u32 blocks; mmc_blk_cmd_err() local 1747 blocks = mmc_sd_num_wr_blocks(card); mmc_blk_cmd_err() 1748 if (blocks != (u32)-1) { mmc_blk_cmd_err() 1749 ret = blk_end_request(req, 0, blocks << 9); mmc_blk_cmd_err() 1852 * When 4KB native sector is enabled, only 8 blocks mmc_blk_issue_rw_rq() 1855 if ((brq->data.blocks & 0x07) && mmc_blk_issue_rw_rq() 1940 if (brq->data.blocks > 1) { mmc_blk_issue_rw_rq()
|
H A D | mmc_test.c | 70 * @blocks: number of (512 byte) blocks currently mapped by @sg 81 unsigned int blocks; member in struct:mmc_test_area 192 unsigned dev_addr, unsigned blocks, unsigned blksz, int write) mmc_test_prepare_mrq() 196 if (blocks > 1) { mmc_test_prepare_mrq() 210 if (blocks == 1) mmc_test_prepare_mrq() 219 mrq->data->blocks = blocks; mmc_test_prepare_mrq() 680 if (mrq->data->blocks > 1) { mmc_test_prepare_broken_mrq() 709 mrq->data->blocks * mrq->data->blksz) mmc_test_check_result() 749 if (mrq->data->blocks > 1) { mmc_test_check_broken_result() 782 unsigned dev_addr, unsigned blocks, mmc_test_nonblock_transfer() 815 blocks, blksz, write); mmc_test_nonblock_transfer() 832 dev_addr += blocks; mmc_test_nonblock_transfer() 847 unsigned blocks, unsigned blksz, int write) mmc_test_simple_transfer() 859 blocks, blksz, write); mmc_test_simple_transfer() 872 unsigned blocks, unsigned blksz, int write) mmc_test_broken_transfer() 885 sg_init_one(&sg, test->buffer, blocks * blksz); mmc_test_broken_transfer() 887 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); mmc_test_broken_transfer() 904 unsigned blocks, unsigned blksz, int write) mmc_test_transfer() 910 for (i = 0;i < blocks * blksz;i++) mmc_test_transfer() 924 blocks, blksz, write); mmc_test_transfer() 935 sectors = (blocks * blksz + 511) / 512; mmc_test_transfer() 936 if ((sectors * 512) == (blocks * blksz)) mmc_test_transfer() 952 for (i = 0;i < blocks * blksz;i++) { mmc_test_transfer() 965 for (i = 0;i < blocks * blksz;i++) { mmc_test_transfer() 1430 t->blocks = sz >> 9; mmc_test_area_map() 1455 t->blocks, 512, write); mmc_test_area_transfer() 1495 dev_addr, t->blocks, 512, write, count); mmc_test_area_io_seq() 190 mmc_test_prepare_mrq(struct mmc_test_card *test, struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) mmc_test_prepare_mrq() argument 780 mmc_test_nonblock_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write, int count) mmc_test_nonblock_transfer() argument 845 mmc_test_simple_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) mmc_test_simple_transfer() argument 871 mmc_test_broken_transfer(struct mmc_test_card *test, unsigned blocks, unsigned blksz, int write) mmc_test_broken_transfer() argument 902 mmc_test_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) mmc_test_transfer() argument
|
/linux-4.1.27/arch/x86/kernel/cpu/mcheck/ |
H A D | mce_amd.c | 512 if (per_cpu(threshold_banks, cpu)[bank]->blocks) { allocate_threshold_blocks() 514 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); allocate_threshold_blocks() 516 per_cpu(threshold_banks, cpu)[bank]->blocks = b; allocate_threshold_blocks() 554 struct list_head *head = &b->blocks->miscj; __threshold_add_blocks() 559 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name); __threshold_add_blocks() 674 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { deallocate_threshold_block() 680 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); deallocate_threshold_block() 681 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; deallocate_threshold_block() 691 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj) __threshold_remove_blocks() 704 if (!b->blocks) threshold_remove_bank()
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_mount.h | 62 uint m_readio_blocks; /* min read size blocks */ 64 uint m_writeio_blocks; /* min write size blocks */ 103 int m_ialloc_blks; /* blocks in inode allocation */ 108 __uint64_t m_resblks; /* total reserved blocks */ 109 __uint64_t m_resblks_avail;/* available reserved blocks */ 122 struct delayed_work m_eofblocks_work; /* background eof blocks 141 * reference the newly added blocks. Does not need to be persistent 159 #define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */ 276 __uint32_t pagf_flcount; /* count of blocks in freelist */ 277 xfs_extlen_t pagf_freeblks; /* total free blocks */ 279 __uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
|
H A D | xfs_rtalloc.c | 193 * If there are blocks not being allocated at the front of the xfs_rtallocate_range() 205 * If there are blocks not being allocated at the end of the xfs_rtallocate_range() 470 * Loop over all bitmap blocks (bbno + i is current block). xfs_rtallocate_extent_near() 514 * Loop backwards through the bitmap blocks from xfs_rtallocate_extent_near() 555 * There weren't intervening bitmap blocks xfs_rtallocate_extent_near() 579 * still more blocks on the negative side, go there. xfs_rtallocate_extent_near() 591 * blocks to go, go there. The 0 case moves to block 1. xfs_rtallocate_extent_near() 596 * If negative or 0 and there are more negative blocks, xfs_rtallocate_extent_near() 640 * At each level, look at all the bitmap blocks, to see if there xfs_rtallocate_extent_size() 647 * Loop over all the bitmap blocks. xfs_rtallocate_extent_size() 688 * Didn't find any maxlen blocks. Try smaller ones, unless xfs_rtallocate_extent_size() 705 * Loop over all the bitmap blocks, try an allocation xfs_rtallocate_extent_size() 763 xfs_extlen_t oblocks, /* old count of blocks */ xfs_growfs_rt_alloc() 764 xfs_extlen_t nblocks, /* new count of blocks */ xfs_growfs_rt_alloc() 773 xfs_bmap_free_t flist; /* list of freed blocks */ xfs_growfs_rt_alloc() 804 * Allocate blocks to the bitmap file. xfs_growfs_rt_alloc() 816 * Free any blocks freed up in the transaction, then commit. xfs_growfs_rt_alloc() 825 * Now we need to clear the allocated blocks. xfs_growfs_rt_alloc() 893 xfs_rfsblock_t nrblocks; /* new number of realtime blocks */ xfs_growfs_rt() 894 xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ xfs_growfs_rt() 897 xfs_extlen_t nrsumblocks; /* new number of summary blocks */ xfs_growfs_rt() 901 xfs_extlen_t rbmblocks; /* current number of rt bitmap blocks */ xfs_growfs_rt() 966 * Loop over the bitmap blocks. xfs_growfs_rt() 1069 * Mark more blocks free in the superblock. xfs_growfs_rt()
|
/linux-4.1.27/drivers/staging/rtl8192e/ |
H A D | rtllib_crypt_ccmp.c | 224 int blocks, last, len; rtllib_ccmp_encrypt() local 236 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; rtllib_ccmp_encrypt() 239 for (i = 1; i <= blocks; i++) { rtllib_ccmp_encrypt() 240 len = (i == blocks && last) ? last : AES_BLOCK_LEN; rtllib_ccmp_encrypt() 316 int i, blocks, last, len; rtllib_ccmp_decrypt() local 322 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; rtllib_ccmp_decrypt() 325 for (i = 1; i <= blocks; i++) { rtllib_ccmp_decrypt() 326 len = (i == blocks && last) ? last : AES_BLOCK_LEN; rtllib_ccmp_decrypt()
|
/linux-4.1.27/drivers/staging/rtl8192u/ieee80211/ |
H A D | ieee80211_crypt_ccmp.c | 234 int blocks, last, len; ieee80211_ccmp_encrypt() local 246 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; ieee80211_ccmp_encrypt() 249 for (i = 1; i <= blocks; i++) { ieee80211_ccmp_encrypt() 250 len = (i == blocks && last) ? last : AES_BLOCK_LEN; ieee80211_ccmp_encrypt() 332 int i, blocks, last, len; ieee80211_ccmp_decrypt() local 338 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; ieee80211_ccmp_decrypt() 341 for (i = 1; i <= blocks; i++) { ieee80211_ccmp_decrypt() 342 len = (i == blocks && last) ? last : AES_BLOCK_LEN; ieee80211_ccmp_decrypt()
|
/linux-4.1.27/arch/mips/bcm63xx/ |
H A D | setup.c | 36 /* soft reset all blocks */ bcm6348_a1_reboot() 37 printk(KERN_INFO "soft-resetting all blocks ...\n"); bcm6348_a1_reboot()
|
/linux-4.1.27/arch/mips/include/asm/octeon/ |
H A D | cvmx-l2c.h | 207 * a way, while a 1 bit blocks the core from evicting any 212 * @note If any ways are blocked for all cores and the HW blocks, then 214 * All cores and the hardware blocks are free to read from all 220 * Return the L2 Cache way partitioning for the hw blocks. 229 * Partitions the L2 cache for the hardware blocks. 233 * a way, while a 1 bit blocks the core from evicting any 238 * @note If any ways are blocked for all cores and the HW blocks, then 240 * All cores and the hardware blocks are free to read from all
|
/linux-4.1.27/include/uapi/asm-generic/ |
H A D | stat.h | 35 long st_blocks; /* Number 512-byte blocks allocated. */ 60 long long st_blocks; /* Number 512-byte blocks allocated. */
|
/linux-4.1.27/arch/frv/include/uapi/asm/ |
H A D | stat.h | 46 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 86 unsigned long st_blocks; /* Number 512-byte blocks allocated. */
|
/linux-4.1.27/net/wireless/ |
H A D | lib80211_crypt_ccmp.c | 221 int data_len, i, blocks, last, len; lib80211_ccmp_encrypt() local 241 blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); lib80211_ccmp_encrypt() 244 for (i = 1; i <= blocks; i++) { lib80211_ccmp_encrypt() 245 len = (i == blocks && last) ? last : AES_BLOCK_LEN; lib80211_ccmp_encrypt() 294 int i, blocks, last, len; lib80211_ccmp_decrypt() local 347 blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); lib80211_ccmp_decrypt() 350 for (i = 1; i <= blocks; i++) { lib80211_ccmp_decrypt() 351 len = (i == blocks && last) ? last : AES_BLOCK_LEN; lib80211_ccmp_decrypt()
|
/linux-4.1.27/fs/affs/ |
H A D | affs.h | 50 u32 *i_lc; /* linear cache of extended blocks */ 54 struct affs_ext_key *i_ac; /* associative cache of extended blocks */ 60 int i_pa_cnt; /* number of preallocated blocks */ 79 u32 bm_free; /* Free blocks in here */ 83 int s_partition_size; /* Partition size in blocks. */ 84 int s_reserved; /* Number of reserved blocks. */ 96 u32 s_bmap_count; /* # of bitmap blocks. */ 97 u32 s_bmap_bits; /* # of bits in one bitmap blocks */
|
/linux-4.1.27/arch/s390/include/uapi/asm/ |
H A D | vtoc.h | 50 char blkperci[4]; /* no of blocks per CI (FBA), blanks for CKD */ 84 __u8 DS4DEVDB; /* number of directory blocks per track */ 195 __u32 usable_count; /* Number of usable cylinders/blocks */ 197 * blocks */ 198 __u32 block_count; /* Disk size in CMS blocks */ 199 __u32 used_count; /* Number of CMS blocks in use */
|
/linux-4.1.27/drivers/target/ |
H A D | target_core_sbc.c | 50 u32 blocks; sbc_emulate_readcapacity() local 68 blocks = 0xffffffff; sbc_emulate_readcapacity() 70 blocks = (u32)blocks_long; sbc_emulate_readcapacity() 72 buf[0] = (blocks >> 24) & 0xff; sbc_emulate_readcapacity() 73 buf[1] = (blocks >> 16) & 0xff; sbc_emulate_readcapacity() 74 buf[2] = (blocks >> 8) & 0xff; sbc_emulate_readcapacity() 75 buf[3] = blocks & 0xff; sbc_emulate_readcapacity() 100 unsigned long long blocks = dev->transport->get_blocks(dev); sbc_emulate_readcapacity_16() local 103 buf[0] = (blocks >> 56) & 0xff; sbc_emulate_readcapacity_16() 104 buf[1] = (blocks >> 48) & 0xff; sbc_emulate_readcapacity_16() 105 buf[2] = (blocks >> 40) & 0xff; sbc_emulate_readcapacity_16() 106 buf[3] = (blocks >> 32) & 0xff; sbc_emulate_readcapacity_16() 107 buf[4] = (blocks >> 24) & 0xff; sbc_emulate_readcapacity_16() 108 buf[5] = (blocks >> 16) & 0xff; sbc_emulate_readcapacity_16() 109 buf[6] = (blocks >> 8) & 0xff; sbc_emulate_readcapacity_16() 110 buf[7] = blocks & 0xff; sbc_emulate_readcapacity_16() 197 * logical blocks shall be written. Any other value transport_get_sectors_6() 198 * specifies the number of logical blocks that shall be transport_get_sectors_6() 336 * 2) transfer logical blocks from the data-out buffer; xdreadwrite_callback() 337 * 3) XOR the logical blocks transferred from the data-out buffer with xdreadwrite_callback() 338 * the logical blocks read, storing the resulting XOR data in a buffer; xdreadwrite_callback() 340 * blocks transferred from the data-out buffer; and xdreadwrite_callback()
|
/linux-4.1.27/drivers/scsi/ |
H A D | g_NCR5380.c | 524 int blocks = len / 128; NCR5380_pread() local 532 NCR5380_write(C400_BLOCK_COUNTER_REG, blocks); NCR5380_pread() 538 printk(KERN_ERR "53C400r: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); NCR5380_pread() 554 blocks--; NCR5380_pread() 557 if (blocks) { NCR5380_pread() 574 blocks--; NCR5380_pread() 608 int blocks = len / 128; NCR5380_pwrite() local 617 NCR5380_write(C400_BLOCK_COUNTER_REG, blocks); NCR5380_pwrite() 620 printk(KERN_ERR "53C400w: Got 53C80_IRQ start=%d, blocks=%d\n", start, blocks); NCR5380_pwrite() 639 blocks--; NCR5380_pwrite() 641 if (blocks) { NCR5380_pwrite() 655 blocks--; NCR5380_pwrite()
|
/linux-4.1.27/drivers/usb/storage/ |
H A D | jumpshot.c | 476 // currently, we don't allocate any extra blocks so we're okay jumpshot_info_destructor() 487 unsigned long block, blocks; jumpshot_transport() local 542 blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); jumpshot_transport() 545 block, blocks); jumpshot_transport() 546 return jumpshot_read_data(us, info, block, blocks); jumpshot_transport() 555 blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | jumpshot_transport() 559 block, blocks); jumpshot_transport() 560 return jumpshot_read_data(us, info, block, blocks); jumpshot_transport() 567 blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); jumpshot_transport() 570 block, blocks); jumpshot_transport() 571 return jumpshot_write_data(us, info, block, blocks); jumpshot_transport() 580 blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | jumpshot_transport() 584 block, blocks); jumpshot_transport() 585 return jumpshot_write_data(us, info, block, blocks); jumpshot_transport()
|
H A D | datafab.c | 560 unsigned long block, blocks; datafab_transport() local 613 blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); datafab_transport() 616 block, blocks); datafab_transport() 617 return datafab_read_data(us, info, block, blocks); datafab_transport() 626 blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | datafab_transport() 630 block, blocks); datafab_transport() 631 return datafab_read_data(us, info, block, blocks); datafab_transport() 638 blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8])); datafab_transport() 641 block, blocks); datafab_transport() 642 return datafab_write_data(us, info, block, blocks); datafab_transport() 651 blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) | datafab_transport() 655 block, blocks); datafab_transport() 656 return datafab_write_data(us, info, block, blocks); datafab_transport()
|
/linux-4.1.27/sound/soc/intel/haswell/ |
H A D | sst-haswell-dsp.c | 79 __le32 blocks; /* # of blocks */ member in struct:fw_module_header 107 dev_dbg(dsp->dev, "new module sign 0x%s size 0x%x blocks 0x%x type 0x%x\n", hsw_parse_module() 109 module->blocks, module->type); hsw_parse_module() 126 for (count = 0; count < module->blocks; count++) { hsw_parse_module() 166 dev_err(dsp->dev, "error: could not allocate blocks for module %d\n", hsw_parse_module() 260 /* enable power gating and switch off DRAM & IRAM blocks */ hsw_set_dsp_D3() 367 /* set default power gating control, enable power gating control for all blocks. that is, hsw_set_dsp_D0() 449 int blocks; member in struct:sst_adsp_memregion 664 /* register DSP memory blocks - ideally we should get this from ACPI */ hsw_init() 667 size = (region[i].end - region[i].start) / region[i].blocks; hsw_init() 669 /* register individual memory blocks */ hsw_init() 670 for (j = 0; j < region[i].blocks; j++) { hsw_init() 679 /* set default power gating control, enable power gating control for all blocks. that is, hsw_init()
|
/linux-4.1.27/fs/f2fs/ |
H A D | debug.c | 221 seq_printf(s, "Utilization: %d%% (%d valid blocks)\n", stat_show() 272 seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks, stat_show() 274 seq_printf(s, " - data blocks : %d (%d)\n", si->data_blks, stat_show() 276 seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks, stat_show() 310 seq_printf(s, "IPU: %u blocks\n", si->inplace_count); stat_show() 311 seq_printf(s, "SSR: %u blocks in %u segments\n", stat_show() 313 seq_printf(s, "LFS: %u blocks in %u segments\n", stat_show()
|
/linux-4.1.27/drivers/mmc/host/ |
H A D | cb710-mmc.c | 263 return !(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8)); cb710_is_transfer_size_supported() 269 size_t len, blocks = data->blocks; cb710_mmc_receive() local 272 /* TODO: I don't know how/if the hardware handles non-16B-boundary blocks cb710_mmc_receive() 274 if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8))) cb710_mmc_receive() 284 while (blocks-- > 0) { cb710_mmc_receive() 313 size_t len, blocks = data->blocks; cb710_mmc_send() local 317 * non-16B-boundary blocks */ cb710_mmc_send() 318 if (unlikely(data->blocks > 1 && data->blksz & 15)) cb710_mmc_send() 326 while (blocks-- > 0) { cb710_mmc_send() 440 data->bytes_xfered = data->blksz * data->blocks; cb710_mmc_transfer_data() 458 cb710_mmc_set_transfer_size(slot, data->blocks, data->blksz); cb710_mmc_command()
|
H A D | usdhi6rol0.c | 175 /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */ 307 * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks 318 data->blksz, data->blocks, sg->offset); usdhi6_blk_bounce() 343 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); usdhi6_sg_prep() 377 * Max blksz = 512, so blocks can only span 2 pages usdhi6_sg_map() 416 /* More blocks in this SG, don't unmap the next page */ usdhi6_sg_unmap() 466 /* More blocks in this page */ usdhi6_sg_advance() 627 __func__, data->sg_len, data->blocks, data->blksz); usdhi6_dma_kill() 902 data ? host->offset : 0, data ? data->blocks : 0, usdhi6_request_done() 910 __func__, mrq->cmd->opcode, data ? data->blocks : 0, usdhi6_request_done() 944 mrq->data->blocks > 1)) { usdhi6_cmd_flags() 1005 if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) { usdhi6_rq_start() 1026 dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", usdhi6_rq_start() 1027 __func__, data->blocks, data->blksz); usdhi6_rq_start() 1034 data->blocks > 1)) usdhi6_rq_start() 1040 data->blocks > 1) && usdhi6_rq_start() 1045 data->blksz, data->blocks, data->sg->offset); usdhi6_rq_start() 1056 "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n", usdhi6_rq_start() 1057 __func__, cmd->opcode, data->blocks, data->blksz, usdhi6_rq_start() 1074 data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0); usdhi6_rq_start() 1341 mrq->data->blocks > 1)) usdhi6_end_cmd() 1348 mrq->data->blocks > 1)) usdhi6_end_cmd() 1514 data->bytes_xfered = data->blocks * data->blksz; usdhi6_sd_bh() 1671 host->offset, data->blocks, data->blksz, data->sg_len, usdhi6_timeout_work()
|
/linux-4.1.27/arch/arm/mach-ixp4xx/ |
H A D | ixp4xx_npe.c | 523 struct dl_block blocks[0]; npe_load_firmware() member in union:dl_image::__anon222 533 int i, j, err, data_size, instr_size, blocks, table_end; npe_load_firmware() local 604 for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size; npe_load_firmware() 605 blocks++) npe_load_firmware() 606 if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF) npe_load_firmware() 608 if (blocks * sizeof(struct dl_block) / 4 >= image->size) { npe_load_firmware() 615 print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks); npe_load_firmware() 618 table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */; npe_load_firmware() 619 for (i = 0, blk = image->blocks; i < blocks; i++, blk++) { npe_load_firmware()
|
/linux-4.1.27/drivers/md/ |
H A D | dm-cache-metadata.h | 64 * The metadata needs to know how many cache blocks there are. We don't 66 * origin blocks to map to. 132 * Query method. Are all the blocks in the cache clean?
|
H A D | dm-bio-prison.h | 27 * Keys define a range of blocks within either a virtual or physical 107 * We use the deferred set to keep track of pending reads to shared blocks.
|
H A D | dm-bufio.h | 66 * Prefetch the specified blocks to the cache. 67 * The function starts to read the blocks and returns without waiting for
|
H A D | bitmap.c | 586 /* to 4k blocks */ bitmap_read_sb() 1149 /* We don't actually write all bitmap blocks here, bitmap_write_all() 1186 sector_t offset, sector_t *blocks, 1199 sector_t blocks; bitmap_daemon_work() local 1267 &blocks, 0); bitmap_daemon_work() 1292 * We mustn't write any other blocks before the superblock. bitmap_daemon_work() 1316 sector_t offset, sector_t *blocks, 1339 *blocks = csize - (offset & (csize - 1)); 1374 sector_t blocks; bitmap_startwrite() local 1378 bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); bitmap_startwrite() 1411 offset += blocks; bitmap_startwrite() 1412 if (sectors > blocks) bitmap_startwrite() 1413 sectors -= blocks; bitmap_startwrite() 1435 sector_t blocks; bitmap_endwrite() local 1440 bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); bitmap_endwrite() 1465 offset += blocks; bitmap_endwrite() 1466 if (sectors > blocks) bitmap_endwrite() 1467 sectors -= blocks; bitmap_endwrite() 1474 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, __bitmap_start_sync() argument 1480 *blocks = 1024; __bitmap_start_sync() 1484 bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); __bitmap_start_sync() 1502 int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, bitmap_start_sync() argument 1509 * At least PAGE_SIZE>>9 blocks are covered. bitmap_start_sync() 1515 *blocks = 0; bitmap_start_sync() 1516 while (*blocks < (PAGE_SIZE>>9)) { bitmap_start_sync() 1520 *blocks += blocks1; bitmap_start_sync() 1526 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) bitmap_end_sync() argument 1532 *blocks = 1024; bitmap_end_sync() 1536 bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); bitmap_end_sync() 1564 sector_t blocks; bitmap_close_sync() local 1568 bitmap_end_sync(bitmap, sector, &blocks, 0); bitmap_close_sync() 1569 sector += blocks; bitmap_close_sync() 1577 sector_t blocks; bitmap_cond_end_sync() local 1596 bitmap_end_sync(bitmap, s, &blocks, 0); bitmap_cond_end_sync() 1597 s += blocks; bitmap_cond_end_sync() 1735 sector_t blocks = mddev->resync_max_sectors; bitmap_create() local 1796 err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); bitmap_create() 1829 sector_t blocks; bitmap_load() local 1830 bitmap_start_sync(bitmap, sector, &blocks, 0); bitmap_load() 1831 sector += blocks; bitmap_load() 1945 int bitmap_resize(struct bitmap *bitmap, sector_t blocks, bitmap_resize() argument 1990 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); bitmap_resize() 1998 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); bitmap_resize() 2039 blocks = min(old_counts.chunks << old_counts.chunkshift, bitmap_resize() 2043 for (block = 0; block < blocks; ) { bitmap_resize()
|
/linux-4.1.27/drivers/misc/ |
H A D | hpilo.h | 16 /* max number of open channel control blocks per device, hw limited to 32 */ 18 /* min number of open channel control blocks per device, hw limited to 32 */ 41 /* shared memory on device used for channel control blocks */
|
/linux-4.1.27/drivers/net/wireless/ti/wl18xx/ |
H A D | wl18xx.h | 57 /* number of keys requiring extra spare mem-blocks */ 122 /* Cumulative counter of released Voice memory blocks */ 154 /* Cumulative counter of total released mem blocks since FW-reset */
|
/linux-4.1.27/arch/powerpc/crypto/ |
H A D | aes-spe-modes.S | 50 #define CBC_DEC 32 /* 2 blocks because of incs */ 221 * called from glue layer to encrypt multiple blocks via ECB 222 * Bytes must be larger or equal 16 and only whole blocks are 256 * called from glue layer to decrypt multiple blocks via ECB 257 * Bytes must be larger or equal 16 and only whole blocks are 292 * called from glue layer to encrypt multiple blocks via CBC 293 * Bytes must be larger or equal 16 and only whole blocks are 340 * called from glue layer to decrypt multiple blocks via CBC 415 * called from glue layer to encrypt/decrypt multiple blocks 499 * called from glue layer to encrypt multiple blocks via XTS 564 * u32 rounds, u32 blocks, u8 *iv, u32 *key_twk); 566 * called from glue layer to decrypt multiple blocks via XTS
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | rtas_flash.c | 114 struct flash_block blocks[FLASH_BLOCKS_PER_NODE]; member in struct:flash_block_list 177 if (f->blocks[i].data == NULL) { flash_list_valid() 180 block_size = f->blocks[i].length; flash_list_valid() 205 kmem_cache_free(flash_block_cache, f->blocks[i].data); free_flash_list() 361 fl->blocks[next_free].data = p; rtas_flash_write() 362 fl->blocks[next_free].length = count; rtas_flash_write() 593 * an entry with no data blocks in the reserved buffer in rtas_flash_firmware() 614 f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data)); rtas_flash_firmware() 615 image_size += f->blocks[i].length; rtas_flash_firmware() 616 f->blocks[i].length = cpu_to_be64(f->blocks[i].length); rtas_flash_firmware()
|
/linux-4.1.27/fs/afs/ |
H A D | afs.h | 161 u32 min_quota; /* minimum space set aside (blocks) */ 162 u32 max_quota; /* maximum space this volume may occupy (blocks) */ 163 u32 blocks_in_use; /* space this volume currently occupies (blocks) */
|
/linux-4.1.27/drivers/gpio/ |
H A D | gpio-sch311x.c | 52 struct sch311x_gpio_block blocks[6]; member in struct:sch311x_gpio_priv 250 for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) { sch311x_gpio_probe() 251 block = &priv->blocks[i]; sch311x_gpio_probe() 286 gpiochip_remove(&priv->blocks[i].chip); sch311x_gpio_probe() 298 for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) { sch311x_gpio_remove() 299 gpiochip_remove(&priv->blocks[i].chip); sch311x_gpio_remove()
|
/linux-4.1.27/drivers/staging/gdm72xx/ |
H A D | gdm_sdio.c | 215 int n, blocks, ret, remain; send_sdio_pkt() local 219 blocks = len / func->cur_blksize; send_sdio_pkt() 220 n = blocks * func->cur_blksize; send_sdio_pkt() 221 if (blocks) { send_sdio_pkt() 481 u32 len, blocks, n; gdm_sdio_irq() local 525 blocks = remain / func->cur_blksize; gdm_sdio_irq() 527 if (blocks) { gdm_sdio_irq() 528 n = blocks * func->cur_blksize; gdm_sdio_irq()
|
/linux-4.1.27/drivers/net/wireless/ti/wl12xx/ |
H A D | wl12xx.h | 120 /* Cumulative counter of released Voice memory blocks */ 152 /* Cumulative counter of total released mem blocks since FW-reset */
|
/linux-4.1.27/drivers/phy/ |
H A D | phy-rockchip-usb.c | 58 /* Power down usb phy analog blocks by set siddq 1 */ rockchip_usb_phy_power_off() 77 /* Power up usb phy analog blocks by set siddq 0 */ rockchip_usb_phy_power_on()
|
/linux-4.1.27/arch/x86/include/uapi/asm/ |
H A D | stat.h | 59 /* Number 512-byte blocks allocated. */ 94 __kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */
|
/linux-4.1.27/arch/x86/lib/ |
H A D | csum-partial_64.c | 67 /* main loop using 64byte blocks */ do_csum() 87 /* last up to 7 8byte blocks */ do_csum()
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | backoff.h | 24 * condition code register. Each read blocks the strand for something 35 * chip blocks for 1 cycle.
|
/linux-4.1.27/arch/sh/include/uapi/asm/ |
H A D | stat.h | 62 unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 121 unsigned long long st_blocks; /* Number 512-byte blocks allocated. */
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | tsi108.h | 24 /* Sizes of register spaces for individual blocks */ 38 /* Offsets within Tsi108(A) CSR space for individual blocks */
|
/linux-4.1.27/arch/arm/mach-s3c64xx/include/mach/ |
H A D | pm-core.h | 72 /* S3C64XX UART blocks only support level interrupts, so ensure that s3c_pm_arch_update_uart() 73 * when we restore unused UART blocks we force the level interrupt s3c_pm_arch_update_uart()
|
/linux-4.1.27/lib/raid6/ |
H A D | recov.c | 24 /* Recover two failed data blocks. */ raid6_2data_recov_intx1() 112 /* Recover two failed blocks. */ raid6_dual_recov()
|