Home
last modified time | relevance | path

Searched refs:PAGE_CACHE_SIZE (Results 1 – 200 of 271) sorted by relevance

12

/linux-4.4.14/net/ceph/
Dpagevec.c103 l = min_t(int, PAGE_CACHE_SIZE-po, left); in ceph_copy_user_to_page_vector()
110 if (po == PAGE_CACHE_SIZE) { in ceph_copy_user_to_page_vector()
128 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); in ceph_copy_to_page_vector()
134 if (po == PAGE_CACHE_SIZE) { in ceph_copy_to_page_vector()
151 size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); in ceph_copy_from_page_vector()
157 if (po == PAGE_CACHE_SIZE) { in ceph_copy_from_page_vector()
179 int end = min((int)PAGE_CACHE_SIZE, off + len); in ceph_zero_page_vector_range()
186 while (len >= PAGE_CACHE_SIZE) { in ceph_zero_page_vector_range()
188 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); in ceph_zero_page_vector_range()
189 len -= PAGE_CACHE_SIZE; in ceph_zero_page_vector_range()
/linux-4.4.14/fs/btrfs/
Dzlib.c62 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); in zlib_alloc_workspace()
120 workspace->strm.avail_out = PAGE_CACHE_SIZE; in zlib_compress_pages()
121 workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE); in zlib_compress_pages()
159 workspace->strm.avail_out = PAGE_CACHE_SIZE; in zlib_compress_pages()
175 start += PAGE_CACHE_SIZE; in zlib_compress_pages()
180 PAGE_CACHE_SIZE); in zlib_compress_pages()
226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); in zlib_decompress_biovec()
232 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); in zlib_decompress_biovec()
237 workspace->strm.avail_out = PAGE_CACHE_SIZE; in zlib_decompress_biovec()
277 workspace->strm.avail_out = PAGE_CACHE_SIZE; in zlib_decompress_biovec()
[all …]
Dcompression.c122 csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); in check_compressed_csum()
344 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); in btrfs_submit_compressed_write()
377 PAGE_CACHE_SIZE, in btrfs_submit_compressed_write()
383 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < in btrfs_submit_compressed_write()
384 PAGE_CACHE_SIZE) { in btrfs_submit_compressed_write()
413 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); in btrfs_submit_compressed_write()
415 if (bytes_left < PAGE_CACHE_SIZE) { in btrfs_submit_compressed_write()
420 bytes_left -= PAGE_CACHE_SIZE; in btrfs_submit_compressed_write()
421 first_byte += PAGE_CACHE_SIZE; in btrfs_submit_compressed_write()
460 last_offset = (page_offset(page) + PAGE_CACHE_SIZE); in add_ra_bio_pages()
[all …]
Dlzo.c58 workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); in lzo_alloc_workspace()
59 workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); in lzo_alloc_workspace()
136 pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; in lzo_compress_pages()
139 in_len = min(len, PAGE_CACHE_SIZE); in lzo_compress_pages()
204 pg_bytes_left = PAGE_CACHE_SIZE; in lzo_compress_pages()
226 start += PAGE_CACHE_SIZE; in lzo_compress_pages()
229 in_len = min(bytes_left, PAGE_CACHE_SIZE); in lzo_compress_pages()
269 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); in lzo_decompress_biovec()
292 in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; in lzo_decompress_biovec()
348 in_page_bytes_left = PAGE_CACHE_SIZE; in lzo_decompress_biovec()
[all …]
Dextent_io.c1792 max_bytes = PAGE_CACHE_SIZE; in find_lock_delalloc_range()
2056 u64 end = start + PAGE_CACHE_SIZE - 1; in check_page_uptodate()
2166 PAGE_CACHE_SIZE, start, p, in repair_eb_io_failure()
2170 start += PAGE_CACHE_SIZE; in repair_eb_io_failure()
2567 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { in end_bio_extent_writepage()
2568 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) in end_bio_extent_writepage()
2644 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { in end_bio_extent_readpage()
2645 if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) in end_bio_extent_readpage()
2705 off = i_size & (PAGE_CACHE_SIZE-1); in end_bio_extent_readpage()
2707 zero_user_segment(page, off, PAGE_CACHE_SIZE); in end_bio_extent_readpage()
[all …]
Dinode-map.c286 #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
320 PAGE_CACHE_SIZE / sizeof(*info); in recalculate_thresholds()
484 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); in btrfs_save_ino_cache()
485 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; in btrfs_save_ino_cache()
489 prealloc += 8 * PAGE_CACHE_SIZE; in btrfs_save_ino_cache()
Dstruct-funcs.c69 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
107 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
Dcheck-integrity.c770 if (ret < (int)PAGE_CACHE_SIZE) { in btrfsic_process_superblock()
1244 size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1); in btrfsic_read_from_block_data()
1248 offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1); in btrfsic_read_from_block_data()
1251 cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page)); in btrfsic_read_from_block_data()
1252 BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE)); in btrfsic_read_from_block_data()
1618 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> in btrfsic_release_block_ctx()
1650 if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) { in btrfsic_read_block()
1657 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> in btrfsic_read_block()
1689 PAGE_CACHE_SIZE, 0); in btrfsic_read_block()
1690 if (PAGE_CACHE_SIZE != ret) in btrfsic_read_block()
[all …]
Draid56.c273 memcpy(d, s, PAGE_CACHE_SIZE); in cache_rbio_pages()
953 return DIV_ROUND_UP(nr, PAGE_CACHE_SIZE); in rbio_nr_pages()
1088 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); in rbio_add_io_page()
1089 if (ret == PAGE_CACHE_SIZE) in rbio_add_io_page()
1103 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); in rbio_add_io_page()
1259 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); in finish_rmw()
1511 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in raid56_rmw_stripe()
1806 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in __raid_recover_end_io()
1922 PAGE_CACHE_SIZE); in __raid_recover_end_io()
1931 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); in __raid_recover_end_io()
[all …]
Dfile.c417 int offset = pos & (PAGE_CACHE_SIZE - 1); in btrfs_copy_from_user()
421 PAGE_CACHE_SIZE - offset, write_bytes); in btrfs_copy_from_user()
451 if (copied < PAGE_CACHE_SIZE - offset) { in btrfs_copy_from_user()
1300 if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && in prepare_uptodate_page()
1391 start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1); in lock_and_cleanup_extent_if_need()
1497 nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE), in __btrfs_buffered_write()
1498 PAGE_CACHE_SIZE / (sizeof(struct page *))); in __btrfs_buffered_write()
1506 size_t offset = pos & (PAGE_CACHE_SIZE - 1); in __btrfs_buffered_write()
1508 nrptrs * (size_t)PAGE_CACHE_SIZE - in __btrfs_buffered_write()
1511 PAGE_CACHE_SIZE); in __btrfs_buffered_write()
[all …]
Dinode.c191 PAGE_CACHE_SIZE); in insert_inline_extent()
208 offset = start & (PAGE_CACHE_SIZE - 1); in insert_inline_extent()
260 actual_end > PAGE_CACHE_SIZE || in cow_file_range_inline()
319 btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); in cow_file_range_inline()
433 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); in compress_file_range()
511 (PAGE_CACHE_SIZE - 1); in compress_file_range()
521 PAGE_CACHE_SIZE - offset); in compress_file_range()
577 total_in = ALIGN(total_in, PAGE_CACHE_SIZE); in compress_file_range()
963 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; in cow_file_range()
1103 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> in async_cow_submit()
[all …]
Dfree-space-cache.c32 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
313 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); in io_ctl_init()
320 (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) in io_ctl_init()
357 io_ctl->size = PAGE_CACHE_SIZE; in io_ctl_map_page()
359 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); in io_ctl_map_page()
478 PAGE_CACHE_SIZE - offset); in io_ctl_set_crc()
506 PAGE_CACHE_SIZE - offset); in io_ctl_check_crc()
564 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); in io_ctl_add_bitmap()
624 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); in io_ctl_read_bitmap()
778 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); in __load_free_space_cache()
[all …]
Dextent_io.h124 #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
302 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - in num_extent_pages()
Dioctl.c894 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); in check_defrag_in_cache()
984 u64 len = PAGE_CACHE_SIZE; in defrag_lookup_extent()
1144 page_end = page_start + PAGE_CACHE_SIZE - 1; in cluster_pages_for_defrag()
1204 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; in cluster_pages_for_defrag()
1344 (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) { in btrfs_defrag_file()
1367 next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE); in btrfs_defrag_file()
1719 if (vol_args->size > PAGE_CACHE_SIZE) { in btrfs_ioctl_snap_create_v2()
2984 unsigned int cmp_len = PAGE_CACHE_SIZE; in btrfs_cmp_data()
2989 if (len < PAGE_CACHE_SIZE) in btrfs_cmp_data()
3222 if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) { in btrfs_ioctl_file_extent_same()
[all …]
Dscrub.c1641 memset(mapped_buffer, 0, PAGE_CACHE_SIZE); in scrub_write_page_to_dev_replace()
4297 while (len >= PAGE_CACHE_SIZE) { in copy_nocow_pages_for_inode()
4356 offset += PAGE_CACHE_SIZE; in copy_nocow_pages_for_inode()
4357 physical_for_dev_replace += PAGE_CACHE_SIZE; in copy_nocow_pages_for_inode()
4358 nocow_ctx_logical += PAGE_CACHE_SIZE; in copy_nocow_pages_for_inode()
4359 len -= PAGE_CACHE_SIZE; in copy_nocow_pages_for_inode()
4393 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); in write_page_nocow()
4394 if (ret != PAGE_CACHE_SIZE) { in write_page_nocow()
Dfile-item.c34 PAGE_CACHE_SIZE))
204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) in __btrfs_lookup_bio_sums()
/linux-4.4.14/fs/ecryptfs/
Dmmap.c126 loff_t num_extents_per_page = (PAGE_CACHE_SIZE in ecryptfs_copy_up_encrypted_with_header()
142 memset(page_virt, 0, PAGE_CACHE_SIZE); in ecryptfs_copy_up_encrypted_with_header()
202 PAGE_CACHE_SIZE, in ecryptfs_readpage()
219 page, page->index, 0, PAGE_CACHE_SIZE, in ecryptfs_readpage()
254 if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) in fill_zeros_to_end_of_page()
256 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; in fill_zeros_to_end_of_page()
259 zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE); in fill_zeros_to_end_of_page()
300 page, index, 0, PAGE_CACHE_SIZE, mapping->host); in ecryptfs_write_begin()
326 page, index, 0, PAGE_CACHE_SIZE, in ecryptfs_write_begin()
340 zero_user(page, 0, PAGE_CACHE_SIZE); in ecryptfs_write_begin()
[all …]
Dread_write.c128 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); in ecryptfs_write()
168 PAGE_CACHE_SIZE - start_offset_in_page); in ecryptfs_write()
Dcrypto.c296 remainder_of_page = PAGE_CACHE_SIZE - offset; in virt_to_scatterlist()
438 extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size)); in crypt_extent()
508 extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); in ecryptfs_encrypt_page()
522 PAGE_CACHE_SIZE); in ecryptfs_encrypt_page()
570 rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE, in ecryptfs_decrypt_page()
581 extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); in ecryptfs_decrypt_page()
669 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) in ecryptfs_set_default_sizes()
673 crypt_stat->metadata_size = PAGE_CACHE_SIZE; in ecryptfs_set_default_sizes()
1452 memset(page_virt, 0, PAGE_CACHE_SIZE); in ecryptfs_read_metadata()
1485 memset(page_virt, 0, PAGE_CACHE_SIZE); in ecryptfs_read_metadata()
Dmain.c697 .size = PAGE_CACHE_SIZE,
702 .size = PAGE_CACHE_SIZE,
820 if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { in ecryptfs_init()
828 (unsigned long)PAGE_CACHE_SIZE); in ecryptfs_init()
/linux-4.4.14/fs/hfsplus/
Dbnode.c30 l = min_t(int, len, PAGE_CACHE_SIZE - off); in hfs_bnode_read()
36 l = min_t(int, len, PAGE_CACHE_SIZE); in hfs_bnode_read()
83 l = min_t(int, len, PAGE_CACHE_SIZE - off); in hfs_bnode_write()
90 l = min_t(int, len, PAGE_CACHE_SIZE); in hfs_bnode_write()
113 l = min_t(int, len, PAGE_CACHE_SIZE - off); in hfs_bnode_clear()
119 l = min_t(int, len, PAGE_CACHE_SIZE); in hfs_bnode_clear()
145 l = min_t(int, len, PAGE_CACHE_SIZE - src); in hfs_bnode_copy()
152 l = min_t(int, len, PAGE_CACHE_SIZE); in hfs_bnode_copy()
164 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { in hfs_bnode_copy()
165 l = PAGE_CACHE_SIZE - src; in hfs_bnode_copy()
[all …]
Dbtree.c239 (tree->node_size + PAGE_CACHE_SIZE - 1) >> in hfs_btree_open()
406 if (++off >= PAGE_CACHE_SIZE) { in hfs_bmap_alloc()
Dbitmap.c16 #define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8)
Dxattr.c223 for (; written < node_size; index++, written += PAGE_CACHE_SIZE) { in hfsplus_create_attributes_file()
234 min_t(size_t, PAGE_CACHE_SIZE, node_size - written)); in hfsplus_create_attributes_file()
/linux-4.4.14/fs/squashfs/
Dfile_direct.c99 bytes = res % PAGE_CACHE_SIZE; in squashfs_readpage_block()
102 memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); in squashfs_readpage_block()
156 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { in squashfs_read_cache()
157 int avail = min_t(int, bytes, PAGE_CACHE_SIZE); in squashfs_read_cache()
164 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); in squashfs_read_cache()
Dlzo_wrapper.c105 if (bytes <= PAGE_CACHE_SIZE) { in lzo_uncompress()
109 memcpy(data, buff, PAGE_CACHE_SIZE); in lzo_uncompress()
110 buff += PAGE_CACHE_SIZE; in lzo_uncompress()
111 bytes -= PAGE_CACHE_SIZE; in lzo_uncompress()
Dlz4_wrapper.c120 if (bytes <= PAGE_CACHE_SIZE) { in lz4_uncompress()
124 memcpy(data, buff, PAGE_CACHE_SIZE); in lz4_uncompress()
125 buff += PAGE_CACHE_SIZE; in lz4_uncompress()
126 bytes -= PAGE_CACHE_SIZE; in lz4_uncompress()
Dcache.c278 entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); in squashfs_cache_init()
317 void *buff = entry->data[offset / PAGE_CACHE_SIZE] in squashfs_copy_data()
318 + (offset % PAGE_CACHE_SIZE); in squashfs_copy_data()
320 PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); in squashfs_copy_data()
418 int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in squashfs_read_table()
439 for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) in squashfs_read_table()
Dfile.c178 __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); in read_indexes()
186 int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); in read_indexes()
390 bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { in squashfs_copy_cache()
392 int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0; in squashfs_copy_cache()
407 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); in squashfs_copy_cache()
465 if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> in squashfs_readpage()
490 memset(pageaddr, 0, PAGE_CACHE_SIZE); in squashfs_readpage()
Dsymlink.c54 int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); in squashfs_symlink_readpage()
97 memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); in squashfs_symlink_readpage()
Dpage_actor.c51 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init()
91 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init_special()
Dzlib_wrapper.c72 stream->avail_out = PAGE_CACHE_SIZE; in zlib_uncompress()
88 stream->avail_out = PAGE_CACHE_SIZE; in zlib_uncompress()
Dxz_wrapper.c144 stream->buf.out_size = PAGE_CACHE_SIZE; in squashfs_xz_uncompress()
161 total += PAGE_CACHE_SIZE; in squashfs_xz_uncompress()
Dblock.c184 if (pg_offset == PAGE_CACHE_SIZE) { in squashfs_read_data()
188 avail = min_t(int, in, PAGE_CACHE_SIZE - in squashfs_read_data()
Dpage_actor.h27 actor->length = length ? : pages * PAGE_CACHE_SIZE; in squashfs_page_actor_init()
Ddecompressor.c105 buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); in get_comp_opts()
Dsuper.c156 if (PAGE_CACHE_SIZE > msblk->block_size) { in squashfs_fill_super()
/linux-4.4.14/fs/cramfs/
DREADME89 PAGE_CACHE_SIZE for cramfs_readpage's convenience.)
93 PAGE_CACHE_SIZE may grow in future (if I interpret the comment
96 Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that
97 for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in
102 One option is to change mkcramfs to take its PAGE_CACHE_SIZE from
105 PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
108 PAGE_CACHE_SIZE is subject to change between kernel versions
129 PAGE_CACHE_SIZE.
132 PAGE_CACHE_SIZE.
135 PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks.
[all …]
Dinode.c154 #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE)
176 offset &= PAGE_CACHE_SIZE - 1; in cramfs_read()
231 memcpy(data, kmap(page), PAGE_CACHE_SIZE); in cramfs_read()
235 memset(data, 0, PAGE_CACHE_SIZE); in cramfs_read()
236 data += PAGE_CACHE_SIZE; in cramfs_read()
355 buf->f_bsize = PAGE_CACHE_SIZE; in cramfs_statfs()
498 maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in cramfs_readpage()
518 else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) { in cramfs_readpage()
525 PAGE_CACHE_SIZE, in cramfs_readpage()
534 memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled); in cramfs_readpage()
/linux-4.4.14/fs/logfs/
Dfile.c25 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) in logfs_write_begin()
28 unsigned start = pos & (PAGE_CACHE_SIZE - 1); in logfs_write_begin()
32 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); in logfs_write_begin()
44 unsigned start = pos & (PAGE_CACHE_SIZE - 1); in logfs_write_end()
48 BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize); in logfs_write_end()
145 offset = i_size & (PAGE_CACHE_SIZE-1); in logfs_writepage()
158 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in logfs_writepage()
Ddev_mtd.c49 BUG_ON(len > PAGE_CACHE_SIZE); in loffs_mtd_write()
/linux-4.4.14/fs/ceph/
Daddr.c146 if (offset != 0 || length != PAGE_CACHE_SIZE) { in ceph_invalidatepage()
200 u64 len = PAGE_CACHE_SIZE; in readpage_nounlock()
203 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in readpage_nounlock()
215 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in readpage_nounlock()
237 if (err < PAGE_CACHE_SIZE) in readpage_nounlock()
239 zero_user_segment(page, err, PAGE_CACHE_SIZE); in readpage_nounlock()
281 if (bytes < (int)PAGE_CACHE_SIZE) { in finish_read()
284 zero_user_segment(page, s, PAGE_CACHE_SIZE); in finish_read()
294 bytes -= PAGE_CACHE_SIZE; in finish_read()
418 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) in ceph_readpages()
[all …]
Dfile.c694 (pos+n) | (PAGE_CACHE_SIZE-1)); in ceph_sync_direct_write()
798 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in ceph_sync_write()
944 iocb->ki_pos < PAGE_CACHE_SIZE) { in ceph_read_iter()
947 end = min_t(loff_t, end, PAGE_CACHE_SIZE); in ceph_read_iter()
1223 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); in ceph_zero_partial_page()
1232 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); in ceph_zero_pagecache_range()
1241 if (length >= PAGE_CACHE_SIZE) { in ceph_zero_pagecache_range()
1242 loff_t size = round_down(length, PAGE_CACHE_SIZE); in ceph_zero_pagecache_range()
Dsuper.c918 if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE) in ceph_register_bdi()
920 (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1) in ceph_register_bdi()
924 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; in ceph_register_bdi()
Dmds_client.h99 #define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \
/linux-4.4.14/fs/isofs/
Dcompress.c29 static char zisofs_sink_page[PAGE_CACHE_SIZE];
73 memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); in zisofs_uncompress_block()
124 stream.avail_out = PAGE_CACHE_SIZE - poffset; in zisofs_uncompress_block()
128 stream.avail_out = PAGE_CACHE_SIZE; in zisofs_uncompress_block()
223 end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size); in zisofs_fill_pages()
285 PAGE_CACHE_SIZE - poffset); in zisofs_fill_pages()
310 end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in zisofs_readpage()
/linux-4.4.14/mm/
Dtruncate.c112 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page()
154 PAGE_CACHE_SIZE, 0); in truncate_inode_page()
235 partial_start = lstart & (PAGE_CACHE_SIZE - 1); in truncate_inode_pages_range()
236 partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); in truncate_inode_pages_range()
244 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in truncate_inode_pages_range()
292 unsigned int top = PAGE_CACHE_SIZE; in truncate_inode_pages_range()
617 PAGE_CACHE_SIZE, 0); in invalidate_inode_pages2_range()
742 if (from >= to || bsize == PAGE_CACHE_SIZE) in pagecache_isize_extended()
746 if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) in pagecache_isize_extended()
Dreadahead.c51 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); in read_cache_pages_invalidate_page()
104 task_io_account_read(PAGE_CACHE_SIZE); in read_cache_pages()
220 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; in force_page_cache_readahead()
Dshmem.c78 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
179 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; in shmem_acct_block()
185 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); in shmem_unacct_blocks()
401 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in shmem_undo_range()
403 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); in shmem_undo_range()
404 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); in shmem_undo_range()
457 unsigned int top = PAGE_CACHE_SIZE; in shmem_undo_range()
1521 if (copied < PAGE_CACHE_SIZE) { in shmem_write_end()
1522 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in shmem_write_end()
1524 from + copied, PAGE_CACHE_SIZE); in shmem_write_end()
[all …]
Dfilemap.c1543 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); in do_generic_file_read()
1544 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; in do_generic_file_read()
1601 nr = PAGE_CACHE_SIZE; in do_generic_file_read()
1943 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); in filemap_fault()
1993 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); in filemap_fault()
2098 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); in filemap_map_pages()
2510 offset = (pos & (PAGE_CACHE_SIZE - 1)); in generic_perform_write()
2511 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, in generic_perform_write()
2564 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, in generic_perform_write()
Dfadvise.c127 start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; in SYSCALL_DEFINE4()
/linux-4.4.14/drivers/staging/lustre/lnet/selftest/
Dbrw_test.c91 len = npg * PAGE_CACHE_SIZE; in brw_client_init()
103 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in brw_client_init()
166 addr += PAGE_CACHE_SIZE - BRW_MSIZE; in brw_fill_page()
172 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) in brw_fill_page()
197 addr += PAGE_CACHE_SIZE - BRW_MSIZE; in brw_check_page()
206 for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { in brw_check_page()
277 len = npg * PAGE_CACHE_SIZE; in brw_client_prep_rpc()
289 npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in brw_client_prep_rpc()
466 npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in brw_server_handle()
Dconrpc.c791 brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / in lstcon_bulkrpc_v0_prep()
792 PAGE_CACHE_SIZE; in lstcon_bulkrpc_v0_prep()
827 npg * PAGE_CACHE_SIZE : in lstcon_testrpc_prep()
856 PAGE_CACHE_SIZE : in lstcon_testrpc_prep()
857 min_t(int, nob, PAGE_CACHE_SIZE); in lstcon_testrpc_prep()
Dconctl.c755 args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) in lst_test_add_ioctl()
825 if (data->ioc_plen1 > PAGE_CACHE_SIZE) in lstcon_ioctl_entry()
Dselftest.h396 #define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
/linux-4.4.14/fs/nfs/
Dread.c49 zero_user(page, 0, PAGE_CACHE_SIZE); in nfs_return_empty_page()
104 if (len < PAGE_CACHE_SIZE) in nfs_readpage_async()
105 zero_user_segment(page, len, PAGE_CACHE_SIZE); in nfs_readpage_async()
295 page, PAGE_CACHE_SIZE, page_file_index(page)); in nfs_readpage()
361 if (len < PAGE_CACHE_SIZE) in readpage_async_filler()
362 zero_user_segment(page, len, PAGE_CACHE_SIZE); in readpage_async_filler()
422 npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> in nfs_readpages()
Dfile.c321 unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); in nfs_want_read_modify_write()
397 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); in nfs_write_end()
414 end, PAGE_CACHE_SIZE); in nfs_write_end()
417 zero_user_segment(page, end, PAGE_CACHE_SIZE); in nfs_write_end()
421 zero_user_segment(page, pglen, PAGE_CACHE_SIZE); in nfs_write_end()
455 if (offset != 0 || length < PAGE_CACHE_SIZE) in nfs_invalidate_page()
Dclient.c739 server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in nfs_server_set_fsinfo()
748 server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in nfs_server_set_fsinfo()
753 if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES) in nfs_server_set_fsinfo()
754 server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES; in nfs_server_set_fsinfo()
Dinternal.h647 return PAGE_CACHE_SIZE; in nfs_page_length()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
Drw26.c90 if (offset == 0 && length == PAGE_CACHE_SIZE) { in ll_invalidatepage()
200 *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in ll_get_user_pages()
360 #define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
435 size > (PAGE_CACHE_SIZE / sizeof(*pages)) * in ll_direct_IO_26()
436 PAGE_CACHE_SIZE) { in ll_direct_IO_26()
480 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in ll_write_begin()
500 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in ll_write_end()
Dlloop.c222 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); in do_bio_lustrebacked()
511 lo->lo_blocksize = PAGE_CACHE_SIZE; in loop_set_fd()
529 CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); in loop_set_fd()
531 (unsigned short)PAGE_CACHE_SIZE); in loop_set_fd()
Drw.c151 result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); in ll_cl_init()
982 kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> in ras_update()
1164 offset + PAGE_CACHE_SIZE - 1, in ll_writepage()
Dvvp_io.c515 bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); in vvp_io_read_start()
954 if (from == 0 && to == PAGE_CACHE_SIZE) { in vvp_io_prepare_write()
1035 to = PAGE_CACHE_SIZE; in vvp_io_commit_write()
Dllite_mmap.c458 unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, in ll_teardown_mmaps()
Dvvp_page.c172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); in vvp_page_unmap()
Dllite_lib.c274 buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); in client_common_fill_super()
279 obd_connect_flags2str(buf, PAGE_CACHE_SIZE, in client_common_fill_super()
341 sbi->ll_md_brw_size = PAGE_CACHE_SIZE; in client_common_fill_super()
/linux-4.4.14/fs/gfs2/
Daops.c112 offset = i_size & (PAGE_CACHE_SIZE-1); in gfs2_writepage_common()
114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in gfs2_writepage_common()
241 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); in gfs2_write_jdata_pagevec()
461 zero_user(page, 0, PAGE_CACHE_SIZE); in stuffed_readpage()
474 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); in stuffed_readpage()
563 unsigned long index = *pos / PAGE_CACHE_SIZE; in gfs2_internal_read()
564 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); in gfs2_internal_read()
572 if (offset + size > PAGE_CACHE_SIZE) in gfs2_internal_read()
573 amt = PAGE_CACHE_SIZE - offset; in gfs2_internal_read()
655 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in gfs2_write_begin()
[all …]
Dfile.c357 unsigned long size = PAGE_CACHE_SIZE; in gfs2_allocate_page_backing()
408 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); in gfs2_page_mkwrite()
418 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { in gfs2_page_mkwrite()
431 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); in gfs2_page_mkwrite()
882 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); in __gfs2_fallocate()
Dquota.c744 pg_off = loc % PAGE_CACHE_SIZE; in gfs2_write_disk_quota()
747 if ((pg_off + nbytes) > PAGE_CACHE_SIZE) { in gfs2_write_disk_quota()
749 overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE; in gfs2_write_disk_quota()
Dbmap.c78 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); in gfs2_unstuffer_page()
936 unsigned offset = from & (PAGE_CACHE_SIZE-1); in gfs2_block_truncate_page()
/linux-4.4.14/fs/jfs/
Djfs_metapage.c83 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
319 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { in last_write_complete()
374 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { in metapage_writepage()
419 xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits; in metapage_writepage()
488 int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; in metapage_readpage()
545 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { in metapage_releasepage()
571 BUG_ON(offset || length < PAGE_CACHE_SIZE); in metapage_invalidatepage()
605 if ((page_offset + size) > PAGE_CACHE_SIZE) { in __get_metapage()
624 if (new && (PSIZE == PAGE_CACHE_SIZE)) { in __get_metapage()
791 for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { in __invalidate_metapages()
/linux-4.4.14/fs/ntfs/
Dcompress.c116 memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); in zero_partial_compressed_page()
563 max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - in ntfs_read_compressed_block()
569 zero_user(page, 0, PAGE_CACHE_SIZE); in ntfs_read_compressed_block()
765 PAGE_CACHE_SIZE - in ntfs_read_compressed_block()
777 cb_pos += PAGE_CACHE_SIZE - cur_ofs; in ntfs_read_compressed_block()
819 PAGE_CACHE_SIZE - cur_ofs); in ntfs_read_compressed_block()
820 cb_pos += PAGE_CACHE_SIZE - cur_ofs; in ntfs_read_compressed_block()
856 cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; in ntfs_read_compressed_block()
Daops.c145 recs = PAGE_CACHE_SIZE / rec_size; in ntfs_end_buffer_async_read()
415 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> in ntfs_readpage()
417 zero_user(page, 0, PAGE_CACHE_SIZE); in ntfs_readpage()
466 zero_user(page, 0, PAGE_CACHE_SIZE); in ntfs_readpage()
512 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); in ntfs_readpage()
928 ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; in ntfs_write_mst_block()
952 max_bhs = PAGE_CACHE_SIZE / bh_size; in ntfs_write_mst_block()
964 BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); in ntfs_write_mst_block()
1303 if (ni->itype.index.block_size == PAGE_CACHE_SIZE) in ntfs_write_mst_block()
1368 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> in ntfs_writepage()
[all …]
Dbitmap.c111 len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos); in __ntfs_bitmap_set_bits_in_run()
135 len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE); in __ntfs_bitmap_set_bits_in_run()
Ddir.c336 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { in ntfs_lookup_inode_by_name()
369 if (index_end > kaddr + PAGE_CACHE_SIZE) { in ntfs_lookup_inode_by_name()
814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
847 if (index_end > kaddr + PAGE_CACHE_SIZE) {
1249 cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1); in ntfs_readdir()
1250 bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1); in ntfs_readdir()
1255 (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1)); in ntfs_readdir()
1273 if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) { in ntfs_readdir()
1275 bmp_pos += PAGE_CACHE_SIZE * 8; in ntfs_readdir()
1313 if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { in ntfs_readdir()
[all …]
Dlogfile.c384 size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK); in ntfs_check_and_load_restart_page()
409 size = min_t(int, to_read, PAGE_CACHE_SIZE); in ntfs_check_and_load_restart_page()
512 if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <= in ntfs_check_logfile()
516 log_page_size = PAGE_CACHE_SIZE; in ntfs_check_logfile()
Dsuper.c829 if (vol->mft_record_size > PAGE_CACHE_SIZE) { in parse_ntfs_boot_sector()
833 vol->mft_record_size, PAGE_CACHE_SIZE); in parse_ntfs_boot_sector()
1099 mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size; in check_mft_mirror()
1619 size = PAGE_CACHE_SIZE; in load_and_init_attrdef()
1630 if (size == PAGE_CACHE_SIZE) { in load_and_init_attrdef()
1688 size = PAGE_CACHE_SIZE; in load_and_init_upcase()
1699 if (size == PAGE_CACHE_SIZE) { in load_and_init_upcase()
2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> in get_nr_free_clusters()
2481 max_index, PAGE_CACHE_SIZE / 4); in get_nr_free_clusters()
2494 nr_free -= PAGE_CACHE_SIZE * 8; in get_nr_free_clusters()
[all …]
Dfile.c224 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in ntfs_attr_extend_initialized()
945 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { in ntfs_prepare_pages_for_non_resident_write()
1642 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); in ntfs_commit_pages_after_write()
1709 len = PAGE_CACHE_SIZE - ofs; in ntfs_copy_from_user_iter()
1727 len = PAGE_CACHE_SIZE - copied; in ntfs_copy_from_user_iter()
1734 len = PAGE_CACHE_SIZE; in ntfs_copy_from_user_iter()
1790 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) in ntfs_perform_write()
1801 bytes = PAGE_CACHE_SIZE - ofs; in ntfs_perform_write()
Dntfs.h46 NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE,
Dindex.c293 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { in ntfs_index_lookup()
326 if (index_end > kaddr + PAGE_CACHE_SIZE) { in ntfs_index_lookup()
Dinode.c871 if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { in ntfs_read_locked_inode()
876 PAGE_CACHE_SIZE); in ntfs_read_locked_inode()
1588 if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { in ntfs_read_locked_index_inode()
1591 ni->itype.index.block_size, PAGE_CACHE_SIZE); in ntfs_read_locked_index_inode()
Dattrib.c1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); in ntfs_attr_make_non_resident()
2539 size = PAGE_CACHE_SIZE; in ntfs_attr_set()
2564 memset(kaddr, val, PAGE_CACHE_SIZE); in ntfs_attr_set()
Dlcnalloc.c294 buf_size = PAGE_CACHE_SIZE - buf_size; in ntfs_cluster_alloc()
/linux-4.4.14/fs/9p/
Dvfs_addr.c156 if (offset == 0 && length == PAGE_CACHE_SIZE) in v9fs_invalidate_page()
172 len = PAGE_CACHE_SIZE; in v9fs_vfs_writepage_locked()
291 if (len == PAGE_CACHE_SIZE) in v9fs_write_begin()
316 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in v9fs_write_end()
Dvfs_super.c90 sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; in v9fs_fill_super()
/linux-4.4.14/fs/sysv/
Ddir.c88 limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; in sysv_readdir()
149 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; in sysv_find_entry()
193 kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; in sysv_add_link()
264 memset(base, 0, PAGE_CACHE_SIZE); in sysv_make_empty()
299 kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE; in sysv_empty_dir()
/linux-4.4.14/fs/f2fs/
Ddata.c156 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { in f2fs_submit_page_bio()
195 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < in f2fs_submit_page_mbio()
196 PAGE_CACHE_SIZE) { in f2fs_submit_page_mbio()
328 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in get_read_data_page()
439 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in get_new_data_page()
945 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in f2fs_mpage_readpages()
995 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in f2fs_mpage_readpages()
1132 offset = i_size & (PAGE_CACHE_SIZE - 1); in f2fs_write_data_page()
1136 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in f2fs_write_data_page()
1463 if (len == PAGE_CACHE_SIZE) in f2fs_write_begin()
[all …]
Dinline.c57 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); in read_inline_data()
99 zero_user_segment(page, 0, PAGE_CACHE_SIZE); in f2fs_read_inline_data()
135 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); in f2fs_convert_inline_page()
390 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); in f2fs_convert_inline_dir()
420 if (i_size_read(dir) < PAGE_CACHE_SIZE) { in f2fs_convert_inline_dir()
421 i_size_write(dir, PAGE_CACHE_SIZE); in f2fs_convert_inline_dir()
Dcrypto.c355 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); in f2fs_page_crypto()
357 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); in f2fs_page_crypto()
358 ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, in f2fs_page_crypto()
Dfile.c81 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in f2fs_vm_page_mkwrite()
504 unsigned offset = from & (PAGE_CACHE_SIZE - 1); in truncate_partial_data_page()
525 zero_user(page, offset, PAGE_CACHE_SIZE - offset); in truncate_partial_data_page()
792 off_start = offset & (PAGE_CACHE_SIZE - 1); in punch_hole()
793 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); in punch_hole()
803 PAGE_CACHE_SIZE - off_start); in punch_hole()
1011 off_start = offset & (PAGE_CACHE_SIZE - 1); in f2fs_zero_range()
1012 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); in f2fs_zero_range()
1026 PAGE_CACHE_SIZE - off_start); in f2fs_zero_range()
1172 off_start = offset & (PAGE_CACHE_SIZE - 1); in expand_inode_data()
[all …]
Dsegment.c820 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - in npages_for_summary_flush()
825 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) in npages_for_summary_flush()
844 memcpy(dst, src, PAGE_CACHE_SIZE); in update_meta_page()
846 memset(dst, 0, PAGE_CACHE_SIZE); in update_meta_page()
1542 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - in read_compacted_summaries()
1614 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); in read_normal_summaries()
1697 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - in write_compacted_summaries()
1788 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); in get_next_sit_page()
2111 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); in build_curseg()
Ddebug.c164 si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE; in update_mem_info()
Dsuper.c934 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { in sanity_check_raw_super()
937 PAGE_CACHE_SIZE); in sanity_check_raw_super()
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/
Dmodule.c84 while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) && in kportal_memhog_free()
90 while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) && in kportal_memhog_free()
140 memset(level1p, 0, PAGE_CACHE_SIZE); in kportal_memhog_alloc()
143 count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) { in kportal_memhog_alloc()
155 memset(level2p, 0, PAGE_CACHE_SIZE); in kportal_memhog_alloc()
158 count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) { in kportal_memhog_alloc()
Dtracefile.c146 if (tage->used + len <= PAGE_CACHE_SIZE) in cfs_trace_get_tage_try()
225 if (len > PAGE_CACHE_SIZE) { in cfs_trace_get_tage()
313 if (needed + known_size > PAGE_CACHE_SIZE) in libcfs_debug_vmsg2()
324 max_nob = PAGE_CACHE_SIZE - tage->used - known_size; in libcfs_debug_vmsg2()
388 __LASSERT (tage->used <= PAGE_CACHE_SIZE); in libcfs_debug_vmsg2()
810 if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ in cfs_trace_allocate_string_buffer()
Dtracefile.h330 __LASSERT(tage->used <= PAGE_CACHE_SIZE); \
/linux-4.4.14/fs/ext4/
Dcrypto.c288 sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); in ext4_page_crypto()
290 sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); in ext4_page_crypto()
291 ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, in ext4_page_crypto()
403 BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); in ext4_encrypted_zeroout()
Dreadpage.c143 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; in ext4_mpage_readpages()
220 PAGE_CACHE_SIZE); in ext4_mpage_readpages()
253 PAGE_CACHE_SIZE); in ext4_mpage_readpages()
Dinode.c917 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in ext4_block_write_begin()
929 BUG_ON(from > PAGE_CACHE_SIZE); in ext4_block_write_begin()
930 BUG_ON(to > PAGE_CACHE_SIZE); in ext4_block_write_begin()
1020 from = pos & (PAGE_CACHE_SIZE - 1); in ext4_write_begin()
1258 from = pos & (PAGE_CACHE_SIZE - 1); in ext4_journalled_write_end()
1396 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); in ext4_da_page_release_reservation()
1495 block_invalidatepage(page, 0, PAGE_CACHE_SIZE); in mpage_release_unused_pages()
1869 len = PAGE_CACHE_SIZE; in ext4_writepage()
1893 (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { in ext4_writepage()
1938 len = PAGE_CACHE_SIZE; in mpage_submit_page()
[all …]
Dpage-io.c450 if (len < PAGE_CACHE_SIZE) in ext4_bio_write_page()
451 zero_user_segment(page, len, PAGE_CACHE_SIZE); in ext4_bio_write_page()
Dmove_extent.c271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; in move_extent_per_page()
564 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; in ext4_move_extents()
/linux-4.4.14/drivers/staging/lustre/lustre/include/
Dlustre_mdc.h156 min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE); in mdc_update_max_ea_from_body()
161 min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE); in mdc_update_max_ea_from_body()
Dlustre_disk.h131 #if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8)
134 #define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8)
Dobd_support.h499 memset(kmap(page), val, PAGE_CACHE_SIZE); \
Dlu_object.h1119 CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
/linux-4.4.14/fs/ext2/
Ddir.c40 #if (PAGE_CACHE_SIZE >= 65536) in ext2_rec_len_from_disk()
49 #if (PAGE_CACHE_SIZE >= 65536) in ext2_rec_len_to_disk()
83 if (last_byte > PAGE_CACHE_SIZE) in ext2_last_byte()
84 last_byte = PAGE_CACHE_SIZE; in ext2_last_byte()
121 unsigned limit = PAGE_CACHE_SIZE; in ext2_check_page()
312 ctx->pos += PAGE_CACHE_SIZE - offset; in ext2_readdir()
514 kaddr += PAGE_CACHE_SIZE - reclen; in ext2_add_link()
/linux-4.4.14/fs/udf/
Dfile.c49 memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); in __udf_adinicb_readpage()
90 if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) in udf_adinicb_write_begin()
97 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) in udf_adinicb_write_begin()
/linux-4.4.14/fs/ubifs/
Dfile.c124 memset(addr, 0, PAGE_CACHE_SIZE); in do_readpage()
257 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) in write_begin_slow()
449 if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) { in ubifs_write_begin()
552 if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) { in ubifs_write_end()
627 memset(addr, 0, PAGE_CACHE_SIZE); in populate_page()
676 int len = i_size & (PAGE_CACHE_SIZE - 1); in populate_page()
1005 int err, len = i_size & (PAGE_CACHE_SIZE - 1); in ubifs_writepage()
1037 return do_writepage(page, PAGE_CACHE_SIZE); in ubifs_writepage()
1048 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); in ubifs_writepage()
1160 (PAGE_CACHE_SIZE - 1); in do_truncation()
[all …]
/linux-4.4.14/fs/cifs/
Dfile.c1852 if ((to > PAGE_CACHE_SIZE) || (from > to)) { in cifs_partialpagewrite()
2012 wdata->pagesz = PAGE_CACHE_SIZE; in wdata_send_pages()
2015 (loff_t)PAGE_CACHE_SIZE); in wdata_send_pages()
2016 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; in wdata_send_pages()
2050 if (cifs_sb->wsize < PAGE_CACHE_SIZE) in cifs_writepages()
2074 tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1; in cifs_writepages()
2173 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); in cifs_writepage_locked()
2217 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) in cifs_write_end()
2222 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); in cifs_write_end()
3289 got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes); in cifs_readv_complete()
[all …]
Dinode.c62 PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
2003 unsigned offset = from & (PAGE_CACHE_SIZE - 1);
2011 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
/linux-4.4.14/block/
Dblk-settings.c242 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { in blk_queue_max_hw_sectors()
332 if (max_size < PAGE_CACHE_SIZE) { in blk_queue_max_segment_size()
333 max_size = PAGE_CACHE_SIZE; in blk_queue_max_segment_size()
763 if (mask < PAGE_CACHE_SIZE - 1) { in blk_queue_segment_boundary()
764 mask = PAGE_CACHE_SIZE - 1; in blk_queue_segment_boundary()
Dioctl.c523 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); in blkdev_ioctl()
551 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; in blkdev_ioctl()
Dcompat_ioctl.c713 (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); in compat_blkdev_ioctl()
732 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; in compat_blkdev_ioctl()
Dblk-sysfs.c120 return queue_var_show(PAGE_CACHE_SIZE, (page)); in queue_max_segment_size_show()
/linux-4.4.14/fs/
Dmpage.c147 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; in do_mpage_readpage()
251 zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); in do_mpage_readpage()
474 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; in __mpage_writepage()
586 unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); in __mpage_writepage()
590 zero_user_segment(page, offset, PAGE_CACHE_SIZE); in __mpage_writepage()
Dbuffer.c1547 BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); in block_invalidatepage()
1908 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in __block_write_begin()
1918 BUG_ON(from > PAGE_CACHE_SIZE); in __block_write_begin()
1919 BUG_ON(to > PAGE_CACHE_SIZE); in __block_write_begin()
2061 start = pos & (PAGE_CACHE_SIZE - 1); in block_write_end()
2150 to = min_t(unsigned, PAGE_CACHE_SIZE - from, count); in block_is_partially_uptodate()
2152 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) in block_is_partially_uptodate()
2318 len = PAGE_CACHE_SIZE - zerofrom; in cont_expand_zero()
2447 end = PAGE_CACHE_SIZE; in block_page_mkwrite()
2523 from = pos & (PAGE_CACHE_SIZE - 1); in nobh_write_begin()
[all …]
Dlibfs.c36 buf->f_bsize = PAGE_CACHE_SIZE; in simple_statfs()
406 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { in simple_write_begin()
407 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in simple_write_begin()
409 zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); in simple_write_begin()
445 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in simple_write_end()
480 s->s_blocksize = PAGE_CACHE_SIZE; in simple_fill_super()
Ddax.c451 PAGE_CACHE_SIZE, 0); in __dax_fault()
741 unsigned offset = from & (PAGE_CACHE_SIZE-1); in dax_zero_page_range()
747 BUG_ON((offset + length) > PAGE_CACHE_SIZE); in dax_zero_page_range()
750 bh.b_size = PAGE_CACHE_SIZE; in dax_zero_page_range()
Dsplice.c333 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in __generic_file_splice_read()
400 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); in __generic_file_splice_read()
642 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in default_file_splice_read()
652 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); in default_file_splice_read()
/linux-4.4.14/drivers/staging/lustre/lustre/include/linux/
Dlustre_patchless_compat.h55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page()
/linux-4.4.14/fs/ufs/
Ddir.c114 unsigned limit = PAGE_CACHE_SIZE; in ufs_check_page()
215 if (last_byte > PAGE_CACHE_SIZE) in ufs_last_byte()
216 last_byte = PAGE_CACHE_SIZE; in ufs_last_byte()
344 kaddr += PAGE_CACHE_SIZE - reclen; in ufs_add_link()
457 ctx->pos += PAGE_CACHE_SIZE - offset; in ufs_readdir()
577 memset(base, 0, PAGE_CACHE_SIZE); in ufs_make_empty()
/linux-4.4.14/drivers/mmc/core/
Dhost.c361 host->max_seg_size = PAGE_CACHE_SIZE; in mmc_alloc_host()
363 host->max_req_size = PAGE_CACHE_SIZE; in mmc_alloc_host()
365 host->max_blk_count = PAGE_CACHE_SIZE / 512; in mmc_alloc_host()
/linux-4.4.14/fs/freevxfs/
Dvxfs_immed.c73 memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE); in vxfs_immed_readpage()
Dvxfs_lookup.c48 #define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize))
/linux-4.4.14/drivers/staging/lustre/include/linux/libcfs/linux/
Dlinux-mem.h60 #define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
/linux-4.4.14/fs/nilfs2/
Ddir.c73 if (last_byte > PAGE_CACHE_SIZE) in nilfs_last_byte()
74 last_byte = PAGE_CACHE_SIZE; in nilfs_last_byte()
112 unsigned limit = PAGE_CACHE_SIZE; in nilfs_check_page()
275 ctx->pos += PAGE_CACHE_SIZE - offset; in nilfs_readdir()
463 kaddr += PAGE_CACHE_SIZE - reclen; in nilfs_add_link()
/linux-4.4.14/fs/reiserfs/
Dxattr.c529 size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1)); in reiserfs_xattr_set_handle()
531 if (buffer_size - buffer_pos > PAGE_CACHE_SIZE) in reiserfs_xattr_set_handle()
532 chunk = PAGE_CACHE_SIZE; in reiserfs_xattr_set_handle()
549 if (chunk + skip > PAGE_CACHE_SIZE) in reiserfs_xattr_set_handle()
550 chunk = PAGE_CACHE_SIZE - skip; in reiserfs_xattr_set_handle()
678 if (isize - file_pos > PAGE_CACHE_SIZE) in reiserfs_xattr_get()
679 chunk = PAGE_CACHE_SIZE; in reiserfs_xattr_get()
Dtail_conversion.c154 (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); in direct2indirect()
274 tail = tail + (pos & (PAGE_CACHE_SIZE - 1)); in indirect2direct()
Dinode.c389 offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1); in _get_block_create_0()
590 tail_start = tail_offset & (PAGE_CACHE_SIZE - 1); in convert_tail_for_hole()
2195 unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1); in grab_tail_page()
2267 unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1); in reiserfs_truncate_file()
2428 p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1); in map_block_for_writepage()
2537 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; in reiserfs_write_full_page()
2566 last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); in reiserfs_write_full_page()
2572 zero_user_segment(page, last_offset, PAGE_CACHE_SIZE); in reiserfs_write_full_page()
2911 start = pos & (PAGE_CACHE_SIZE - 1); in reiserfs_write_end()
3183 int partial_page = (offset || length < PAGE_CACHE_SIZE); in reiserfs_invalidatepage()
Dfile.c187 int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; in reiserfs_commit_page()
/linux-4.4.14/net/sunrpc/
Dxdr.c193 pgto_base = PAGE_CACHE_SIZE; in _shift_data_right_pages()
197 pgfrom_base = PAGE_CACHE_SIZE; in _shift_data_right_pages()
243 copy = PAGE_CACHE_SIZE - pgbase; in _copy_to_pages()
256 if (pgbase == PAGE_CACHE_SIZE) { in _copy_to_pages()
287 copy = PAGE_CACHE_SIZE - pgbase; in _copy_from_pages()
296 if (pgbase == PAGE_CACHE_SIZE) { in _copy_from_pages()
1302 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, in xdr_xcode_array2()
1386 (unsigned int) PAGE_CACHE_SIZE); in xdr_xcode_array2()
1482 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); in xdr_process_buf()
1484 thislen = PAGE_CACHE_SIZE - page_offset; in xdr_process_buf()
[all …]
Dsocklib.c116 len = PAGE_CACHE_SIZE; in xdr_partial_copy_from_skb()
/linux-4.4.14/fs/ocfs2/
Daops.c237 if (size > PAGE_CACHE_SIZE || in ocfs2_read_inline_data()
250 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); in ocfs2_read_inline_data()
1018 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; in ocfs2_figure_cluster_boundaries()
1191 #if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
1194 #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
1197 #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
1395 unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), in ocfs2_write_failure()
1434 map_from = user_pos & (PAGE_CACHE_SIZE - 1); in ocfs2_prepare_page_for_write()
1734 wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); in ocfs2_set_target_boundaries()
1771 wc->w_target_to = PAGE_CACHE_SIZE; in ocfs2_set_target_boundaries()
[all …]
Dmmap.c68 unsigned int len = PAGE_CACHE_SIZE; in __ocfs2_page_mkwrite()
Dfile.c797 zero_from = abs_from & (PAGE_CACHE_SIZE - 1); in ocfs2_write_zero_page()
798 zero_to = abs_to & (PAGE_CACHE_SIZE - 1); in ocfs2_write_zero_page()
800 zero_to = PAGE_CACHE_SIZE; in ocfs2_write_zero_page()
962 next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; in ocfs2_zero_extend_range()
Dalloc.c6652 unsigned int from, to = PAGE_CACHE_SIZE; in ocfs2_zero_cluster_pages()
6660 to = PAGE_CACHE_SIZE; in ocfs2_zero_cluster_pages()
6664 from = start & (PAGE_CACHE_SIZE - 1); in ocfs2_zero_cluster_pages()
6666 to = end & (PAGE_CACHE_SIZE - 1); in ocfs2_zero_cluster_pages()
6668 BUG_ON(from > PAGE_CACHE_SIZE); in ocfs2_zero_cluster_pages()
6669 BUG_ON(to > PAGE_CACHE_SIZE); in ocfs2_zero_cluster_pages()
6931 PAGE_CACHE_SIZE < osb->s_clustersize) in ocfs2_convert_inline_data_to_extents()
6932 end = PAGE_CACHE_SIZE; in ocfs2_convert_inline_data_to_extents()
6952 page_end = PAGE_CACHE_SIZE; in ocfs2_convert_inline_data_to_extents()
6953 if (PAGE_CACHE_SIZE > osb->s_clustersize) in ocfs2_convert_inline_data_to_extents()
/linux-4.4.14/fs/minix/
Ddir.c41 unsigned last_byte = PAGE_CACHE_SIZE; in minix_last_byte()
44 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); in minix_last_byte()
232 limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize; in minix_add_link()
330 memset(kaddr, 0, PAGE_CACHE_SIZE); in minix_make_empty()
/linux-4.4.14/fs/jffs2/
Dfile.c97 ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); in jffs2_do_readpage_nolock()
248 unsigned start = pos & (PAGE_CACHE_SIZE - 1); in jffs2_write_end()
264 if (end == PAGE_CACHE_SIZE) { in jffs2_write_end()
Dwrite.c175 if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || in jffs2_write_dnode()
176 ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && in jffs2_write_dnode()
369 datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); in jffs2_write_inode_range()
Ddebug.c98 if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) in __jffs2_dbg_fragtree_paranoia_check_nolock()
99 && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { in __jffs2_dbg_fragtree_paranoia_check_nolock()
105 if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) in __jffs2_dbg_fragtree_paranoia_check_nolock()
106 && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { in __jffs2_dbg_fragtree_paranoia_check_nolock()
Dnodelist.c93 if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { in jffs2_truncate_fragtree()
385 if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { in jffs2_add_full_dnode_to_inode()
394 if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { in jffs2_add_full_dnode_to_inode()
Dgc.c1175 min = start & ~(PAGE_CACHE_SIZE-1); in jffs2_garbage_collect_dnode()
1176 max = min + PAGE_CACHE_SIZE; in jffs2_garbage_collect_dnode()
1334 writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1)); in jffs2_garbage_collect_dnode()
/linux-4.4.14/fs/btrfs/tests/
Dextent-io-tests.c248 if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) { in test_find_delalloc()
250 test_start, test_start + PAGE_CACHE_SIZE - 1, start, in test_find_delalloc()
Dfree-space-tests.c25 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
/linux-4.4.14/fs/nfs/blocklayout/
Dblocklayout.c266 if (pg_offset + bytes_left > PAGE_CACHE_SIZE) in bl_read_pagelist()
267 pg_len = PAGE_CACHE_SIZE - pg_offset; in bl_read_pagelist()
272 pg_len = PAGE_CACHE_SIZE; in bl_read_pagelist()
344 PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK; in bl_write_cleanup()
411 pg_len = PAGE_CACHE_SIZE; in bl_write_pagelist()
809 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); in pnfs_num_cont_bytes()
Dblocklayout.h43 #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
/linux-4.4.14/fs/exofs/
Ddir.c52 if (last_byte > PAGE_CACHE_SIZE) in exofs_last_byte()
53 last_byte = PAGE_CACHE_SIZE; in exofs_last_byte()
88 unsigned limit = PAGE_CACHE_SIZE; in exofs_check_page()
257 ctx->pos += PAGE_CACHE_SIZE - offset; in exofs_readdir()
452 kaddr += PAGE_CACHE_SIZE - reclen; in exofs_add_link()
Dinode.c400 len = PAGE_CACHE_SIZE; in readpage_strip()
445 if (len != PAGE_CACHE_SIZE) in readpage_strip()
446 zero_user(page, len, PAGE_CACHE_SIZE - len); in readpage_strip()
711 len = PAGE_CACHE_SIZE; in writepage_strip()
884 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { in exofs_write_begin()
890 rlen = PAGE_CACHE_SIZE; in exofs_write_begin()
/linux-4.4.14/fs/hostfs/
Dhostfs_kern.c413 int count = PAGE_CACHE_SIZE; in hostfs_writepage()
418 count = inode->i_size & (PAGE_CACHE_SIZE-1); in hostfs_writepage()
450 PAGE_CACHE_SIZE); in hostfs_readpage()
458 memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); in hostfs_readpage()
488 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in hostfs_write_end()
495 if (!PageUptodate(page) && err == PAGE_CACHE_SIZE) in hostfs_write_end()
/linux-4.4.14/fs/xfs/
Dxfs_mount.h224 return PAGE_CACHE_SIZE; in xfs_preferred_iosize()
229 PAGE_CACHE_SIZE)); in xfs_preferred_iosize()
Dxfs_aops.c752 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), in xfs_convert_page()
753 PAGE_CACHE_SIZE); in xfs_convert_page()
754 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; in xfs_convert_page()
930 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); in xfs_aops_discard_page()
1015 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1); in xfs_vm_writepage()
1046 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE); in xfs_vm_writepage()
1752 loff_t from = pos & (PAGE_CACHE_SIZE - 1); in xfs_vm_write_failed()
1828 ASSERT(len <= PAGE_CACHE_SIZE); in xfs_vm_write_begin()
1881 ASSERT(len <= PAGE_CACHE_SIZE); in xfs_vm_write_end()
Dxfs_file.c109 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ in xfs_iozero()
110 bytes = PAGE_CACHE_SIZE - offset; in xfs_iozero()
/linux-4.4.14/drivers/staging/lustre/lustre/mgc/
Dmgc_request.c1115 inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); in mgc_apply_recover_logs()
1119 pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); in mgc_apply_recover_logs()
1120 if (pos >= PAGE_CACHE_SIZE) { in mgc_apply_recover_logs()
1127 bufsz = PAGE_CACHE_SIZE - pos; in mgc_apply_recover_logs()
1159 if (entry->mne_length > PAGE_CACHE_SIZE) { in mgc_apply_recover_logs()
1362 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); in mgc_process_recover_log()
1416 min_t(int, ealen, PAGE_CACHE_SIZE), in mgc_process_recover_log()
1425 ealen -= PAGE_CACHE_SIZE; in mgc_process_recover_log()
/linux-4.4.14/drivers/staging/lustre/lustre/osc/
Dosc_cache.c864 } else if (blocksize < PAGE_CACHE_SIZE && in osc_extent_finish()
865 last_count != PAGE_CACHE_SIZE) { in osc_extent_finish()
876 lost_grant = PAGE_CACHE_SIZE - count; in osc_extent_finish()
1112 LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); in osc_extent_make_ready()
1120 oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; in osc_extent_make_ready()
1279 return kms % PAGE_CACHE_SIZE; in osc_refresh_count()
1281 return PAGE_CACHE_SIZE; in osc_refresh_count()
1362 cli->cl_dirty += PAGE_CACHE_SIZE; in osc_consume_write_grant()
1365 PAGE_CACHE_SIZE, pga, pga->pg); in osc_consume_write_grant()
1381 cli->cl_dirty -= PAGE_CACHE_SIZE; in osc_release_write_grant()
[all …]
Dosc_request.c1306 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && in osc_brw_prep_request()
1308 poff == 0 && pg->count == PAGE_CACHE_SIZE) && in osc_brw_prep_request()
1859 PAGE_CACHE_SIZE); in osc_build_rpc()
2750 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) in osc_get_info()
2755 PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK; in osc_get_info()
/linux-4.4.14/include/linux/
Dpagemap.h98 #define PAGE_CACHE_SIZE PAGE_SIZE macro
100 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
683 return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> in dir_pages()
Df2fs_fs.h264 #define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
284 #define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
Dnilfs2_fs.h334 #if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) in nilfs_rec_len_from_disk()
343 #if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) in nilfs_rec_len_to_disk()
Dbuffer_head.h46 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
/linux-4.4.14/fs/qnx6/
Ddir.c39 if (last_byte > PAGE_CACHE_SIZE) in last_entry()
40 last_byte = PAGE_CACHE_SIZE; in last_entry()
/linux-4.4.14/fs/afs/
Dwrite.c96 if (pos + PAGE_CACHE_SIZE > i_size) in afs_fill_page()
99 len = PAGE_CACHE_SIZE; in afs_fill_page()
126 unsigned from = pos & (PAGE_CACHE_SIZE - 1); in afs_write_begin()
154 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { in afs_write_begin()
Dfile.c322 if (offset == 0 && length == PAGE_CACHE_SIZE) { in afs_invalidatepage()
Dsuper.c318 sb->s_blocksize = PAGE_CACHE_SIZE; in afs_fill_super()
/linux-4.4.14/drivers/staging/lustre/lustre/obdecho/
Decho_client.c1209 offset + npages * PAGE_CACHE_SIZE - 1, in cl_echo_object_brw()
1355 lsm->lsm_stripe_size = PAGE_CACHE_SIZE; in echo_create_object()
1492 LASSERT(count == PAGE_CACHE_SIZE); in echo_client_page_debug_setup()
1496 for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { in echo_client_page_debug_setup()
1524 LASSERT(count == PAGE_CACHE_SIZE); in echo_client_page_debug_check()
1528 for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { in echo_client_page_debug_check()
1595 i++, pgp++, off += PAGE_CACHE_SIZE) { in echo_client_kbrw()
1605 pgp->count = PAGE_CACHE_SIZE; in echo_client_kbrw()
1686 for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { in echo_client_prep_commit()
1688 rnb[i].len = PAGE_CACHE_SIZE; in echo_client_prep_commit()
[all …]
/linux-4.4.14/fs/configfs/
Dmount.c74 sb->s_blocksize = PAGE_CACHE_SIZE; in configfs_fill_super()
/linux-4.4.14/drivers/mmc/host/
Dtmio_mmc_dma.c66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || in tmio_mmc_start_dma_rx()
142 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || in tmio_mmc_start_dma_tx()
/linux-4.4.14/fs/fuse/
Dfile.c687 size_t off = num_read & (PAGE_CACHE_SIZE - 1); in fuse_short_read()
690 zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); in fuse_short_read()
707 size_t count = PAGE_CACHE_SIZE; in fuse_do_readpage()
839 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || in fuse_readpages_fill()
1006 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) in fuse_send_write_pages()
1009 if (count > PAGE_CACHE_SIZE - offset) in fuse_send_write_pages()
1010 count -= PAGE_CACHE_SIZE - offset; in fuse_send_write_pages()
1027 unsigned offset = pos & (PAGE_CACHE_SIZE - 1); in fuse_fill_write_pages()
1038 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, in fuse_fill_write_pages()
1075 if (offset == PAGE_CACHE_SIZE) in fuse_fill_write_pages()
[all …]
Dinode.c867 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; in process_init_reply()
904 ra_pages = fc->max_read / PAGE_CACHE_SIZE; in process_init_reply()
925 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; in fuse_send_init()
958 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; in fuse_bdi_init()
1056 sb->s_blocksize = PAGE_CACHE_SIZE; in fuse_fill_super()
/linux-4.4.14/include/linux/ceph/
Dlibceph.h175 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - in calc_pages_for()
/linux-4.4.14/fs/affs/
Dfile.c513 BUG_ON(to > PAGE_CACHE_SIZE); in affs_do_readpage_ofs()
617 to = PAGE_CACHE_SIZE; in affs_readpage_ofs()
620 memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); in affs_readpage_ofs()
660 err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE); in affs_write_begin_ofs()
681 from = pos & (PAGE_CACHE_SIZE - 1); in affs_write_end_ofs()
/linux-4.4.14/fs/hfs/
Dbtree.c119 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in hfs_btree_open()
282 if (++off >= PAGE_CACHE_SIZE) { in hfs_bmap_alloc()
Dbnode.c432 min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); in hfs_bnode_create()
436 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); in hfs_bnode_create()
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/
Dclass_obd.c464 if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) { in obd_init_checks()
466 (__u64)PAGE_CACHE_SIZE); in obd_init_checks()
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/
Dlib-socket.c160 if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { in lnet_ipif_enumerate()
162 nalloc = PAGE_CACHE_SIZE/sizeof(*ifr); in lnet_ipif_enumerate()
Dlib-md.c139 lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) in lnet_md_build()
Drouter.c1257 rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; in lnet_new_rtrbuf()
1427 large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in lnet_rtrpools_alloc()
Dlib-move.c546 <= PAGE_CACHE_SIZE); in lnet_extract_kiov()
551 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); in lnet_extract_kiov()
880 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { in lnet_msg2bufpool()
/linux-4.4.14/fs/kernfs/
Dmount.c72 sb->s_blocksize = PAGE_CACHE_SIZE; in kernfs_fill_super()
/linux-4.4.14/fs/ramfs/
Dinode.c225 sb->s_blocksize = PAGE_CACHE_SIZE; in ramfs_fill_super()
/linux-4.4.14/fs/ncpfs/
Dncplib_kernel.h194 #define NCP_DIRCACHE_SIZE ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *)))
/linux-4.4.14/fs/efivarfs/
Dsuper.c200 sb->s_blocksize = PAGE_CACHE_SIZE; in efivarfs_fill_super()
/linux-4.4.14/Documentation/filesystems/
Dcramfs.txt41 same endianness, and can be read only by kernels with PAGE_CACHE_SIZE
Dtmpfs.txt63 nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
/linux-4.4.14/drivers/oprofile/
Doprofilefs.c242 sb->s_blocksize = PAGE_CACHE_SIZE; in oprofilefs_fill_super()
/linux-4.4.14/fs/dlm/
Dlowcomms.c598 cbuf_init(&con->cb, PAGE_CACHE_SIZE); in receive_from_sock()
615 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); in receive_from_sock()
633 PAGE_CACHE_SIZE); in receive_from_sock()
1369 (PAGE_CACHE_SIZE - e->end < len)) { in dlm_lowcomms_get_buffer()
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
Drecover.c197 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, in ptlrpc_resend()
Dsec_bulk.c61 #define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
Dlproc_ptlrpc.c311 bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in ptlrpc_lprocfs_req_history_max_seq_write()
1231 if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) in lprocfs_wr_import()
/linux-4.4.14/fs/ocfs2/cluster/
Dheartbeat.c421 vec_start = (cs << bits) % PAGE_CACHE_SIZE; in o2hb_setup_one_bio()
426 vec_len = min(PAGE_CACHE_SIZE - vec_start, in o2hb_setup_one_bio()
427 (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); in o2hb_setup_one_bio()
435 cs += vec_len / (PAGE_CACHE_SIZE/spp); in o2hb_setup_one_bio()
1578 reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; in o2hb_init_region_params()
/linux-4.4.14/drivers/usb/storage/
Dscsiglue.c126 max_sectors = PAGE_CACHE_SIZE >> 9; in slave_configure()
/linux-4.4.14/fs/pstore/
Dinode.c423 sb->s_blocksize = PAGE_CACHE_SIZE; in pstore_fill_super()
/linux-4.4.14/drivers/nvdimm/
Dpmem.c95 pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); in pmem_rw_page()
/linux-4.4.14/arch/s390/hypfs/
Dinode.c281 sb->s_blocksize = PAGE_CACHE_SIZE; in hypfs_fill_super()
/linux-4.4.14/fs/ocfs2/dlmfs/
Ddlmfs.c574 sb->s_blocksize = PAGE_CACHE_SIZE; in dlmfs_fill_super()
/linux-4.4.14/drivers/misc/ibmasm/
Dibmasmfs.c119 sb->s_blocksize = PAGE_CACHE_SIZE; in ibmasmfs_fill_super()
/linux-4.4.14/drivers/block/aoe/
Daoeblk.c400 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; in aoeblk_gdalloc()
/linux-4.4.14/drivers/block/
Dbrd.c374 int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); in brd_rw_page()
/linux-4.4.14/net/sunrpc/auth_gss/
Dgss_krb5_wrap.c84 & (PAGE_CACHE_SIZE - 1); in gss_krb5_remove_padding()
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/socklnd/
Dsocklnd_lib.c286 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) in ksocknal_lib_kiov_vmap()
/linux-4.4.14/drivers/staging/lustre/lustre/mdc/
Dmdc_request.c1000 ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); in mdc_readpage()
1003 PAGE_CACHE_SIZE * op_data->op_npages, in mdc_readpage()
1035 PAGE_CACHE_SIZE * op_data->op_npages); in mdc_readpage()

12