/linux-4.1.27/fs/ocfs2/ |
D | ocfs2.h | 712 u32 clusters) in ocfs2_clusters_to_blocks() argument 717 return (u64)clusters << c_to_b_bits; in ocfs2_clusters_to_blocks() 733 unsigned int clusters; in ocfs2_clusters_for_bytes() local 737 clusters = (unsigned int)(bytes >> cl_bits); in ocfs2_clusters_for_bytes() 739 return clusters; in ocfs2_clusters_for_bytes() 746 unsigned int clusters; in ocfs2_bytes_to_clusters() local 748 clusters = (unsigned int)(bytes >> cl_bits); in ocfs2_bytes_to_clusters() 749 return clusters; in ocfs2_bytes_to_clusters() 760 u32 clusters) in ocfs2_clusters_to_bytes() argument 762 return (u64)clusters << OCFS2_SB(sb)->s_clustersize_bits; in ocfs2_clusters_to_bytes() [all …]
|
D | resize.c | 177 static int update_backups(struct inode * inode, u32 clusters, char *data) in update_backups() argument 190 if (cluster > clusters) in update_backups() 220 u32 clusters = 0; in ocfs2_update_super_and_backups() local 238 clusters = le32_to_cpu(super_di->i_clusters); in ocfs2_update_super_and_backups() 247 ret = update_backups(inode, clusters, super_bh->b_data); in ocfs2_update_super_and_backups() 396 else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc) in ocfs2_check_new_group() 400 le16_to_cpu(gd->bg_bits), input->clusters); in ocfs2_check_new_group() 433 else if (total_clusters + input->clusters < total_clusters) in ocfs2_verify_group_and_input() 435 else if (input->clusters > cl_cpg) in ocfs2_verify_group_and_input() 437 else if (input->frees > input->clusters) in ocfs2_verify_group_and_input() [all …]
|
D | ocfs2_trace.h | 504 unsigned int e_cpos, unsigned int clusters), 505 TP_ARGS(owner, cpos, len, index, e_cpos, clusters), 512 __field(unsigned int, clusters) 520 __entry->clusters = clusters; 524 __entry->e_cpos, __entry->clusters) 529 unsigned int clusters, unsigned int depth), 530 TP_ARGS(ino, new_cpos, clusters, depth), 534 __field(unsigned int, clusters) 540 __entry->clusters = clusters; 545 __entry->clusters, __entry->depth) [all …]
|
D | file.c | 1371 u32 cpos, clusters, extent_len, phys_cpos; in ocfs2_check_range_for_holes() local 1375 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; in ocfs2_check_range_for_holes() 1377 while (clusters) { in ocfs2_check_range_for_holes() 1390 if (extent_len > clusters) in ocfs2_check_range_for_holes() 1391 extent_len = clusters; in ocfs2_check_range_for_holes() 1393 clusters -= extent_len; in ocfs2_check_range_for_holes() 1426 u32 cpos, phys_cpos, clusters, alloc_size; in ocfs2_allocate_unwritten_extents() local 1455 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len); in ocfs2_allocate_unwritten_extents() 1456 clusters -= cpos; in ocfs2_allocate_unwritten_extents() 1458 while (clusters) { in ocfs2_allocate_unwritten_extents() [all …]
|
D | refcounttree.c | 2389 u32 clusters, in ocfs2_calc_refcount_meta_credits() argument 2400 while (clusters) { in ocfs2_calc_refcount_meta_credits() 2402 cpos, clusters, &rec, in ocfs2_calc_refcount_meta_credits() 2432 recs_add, (unsigned long long)cpos, clusters, in ocfs2_calc_refcount_meta_credits() 2437 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + in ocfs2_calc_refcount_meta_credits() 2466 if (cpos + clusters < le64_to_cpu(rec.r_cpos) + in ocfs2_calc_refcount_meta_credits() 2474 clusters -= len; in ocfs2_calc_refcount_meta_credits() 2517 (unsigned long long)start_cpos, clusters, in ocfs2_calc_refcount_meta_credits() 2539 u32 clusters, in ocfs2_prepare_refcount_change_for_del() argument 2576 start_cpos, clusters, in ocfs2_prepare_refcount_change_for_del() [all …]
|
D | refcounttree.h | 52 u32 clusters,
|
D | alloc.c | 109 u32 clusters); 158 u32 clusters); 162 u32 clusters); 196 u32 clusters) in ocfs2_dinode_update_clusters() argument 201 le32_add_cpu(&di->i_clusters, clusters); in ocfs2_dinode_update_clusters() 216 u32 clusters) in ocfs2_dinode_extent_map_truncate() argument 220 ocfs2_extent_map_trunc(inode, clusters); in ocfs2_dinode_extent_map_truncate() 282 u32 clusters) in ocfs2_xattr_value_update_clusters() argument 286 le32_add_cpu(&vb->vb_xv->xr_clusters, clusters); in ocfs2_xattr_value_update_clusters() 328 u32 clusters) in ocfs2_xattr_tree_update_clusters() argument [all …]
|
D | ocfs2_ioctl.h | 60 __u32 clusters; /* Total number of clusters in this group */ member
|
D | suballoc.c | 354 u64 p_blkno, unsigned int clusters) in ocfs2_bg_discontig_add_extent() argument 366 rec->e_leaf_clusters = cpu_to_le16(clusters); in ocfs2_bg_discontig_add_extent() 367 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); in ocfs2_bg_discontig_add_extent() 369 clusters * le16_to_cpu(cl->cl_bpc)); in ocfs2_bg_discontig_add_extent() 528 u32 p_cpos, clusters; in ocfs2_block_group_grow_discontig() local 547 &clusters); in ocfs2_block_group_grow_discontig() 555 clusters); in ocfs2_block_group_grow_discontig() 557 min_bits = clusters; in ocfs2_block_group_grow_discontig()
|
D | xattr.c | 1096 u32 cpos, p_cluster, num_clusters, bpc, clusters; in ocfs2_xattr_get_value_outside() local 1104 clusters = le32_to_cpu(xv->xr_clusters); in ocfs2_xattr_get_value_outside() 1109 while (cpos < clusters) { in ocfs2_xattr_get_value_outside() 1346 u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len); in __ocfs2_xattr_set_value_outside() local 1352 BUG_ON(clusters > le32_to_cpu(xv->xr_clusters)); in __ocfs2_xattr_set_value_outside() 1354 while (cpos < clusters) { in __ocfs2_xattr_set_value_outside() 3961 u32 clusters, in ocfs2_iterate_xattr_buckets() argument 3967 u32 num_buckets = clusters * bpc; in ocfs2_iterate_xattr_buckets() 3978 (unsigned long long)blkno, clusters); in ocfs2_iterate_xattr_buckets() 5892 u32 clusters = le32_to_cpu(xv->xr_clusters); in ocfs2_xattr_value_attach_refcount() local [all …]
|
/linux-4.1.27/arch/arm/common/ |
D | mcpm_entry.c | 346 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down() 347 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down() 360 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down() 361 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down() 376 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical() 377 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical() 395 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; in __mcpm_outbound_enter_critical() 447 sync_cache_r(&mcpm_sync.clusters[cluster].cluster); in __mcpm_cluster_state() 448 return mcpm_sync.clusters[cluster].cluster; in __mcpm_cluster_state() 466 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; in mcpm_sync_init() [all …]
|
/linux-4.1.27/Documentation/blockdev/drbd/ |
D | README.txt | 5 clusters and in this context, is a "drop-in" replacement for shared
|
/linux-4.1.27/fs/ntfs/ |
D | lcnalloc.c | 153 s64 clusters; in ntfs_cluster_alloc() local 253 clusters = count; in ntfs_cluster_alloc() 414 if (!--clusters) { in ntfs_cluster_alloc() 769 (unsigned long long)(count - clusters)); in ntfs_cluster_alloc()
|
/linux-4.1.27/arch/arm/include/asm/ |
D | mcpm.h | 274 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; member
|
/linux-4.1.27/drivers/net/hippi/ |
D | Kconfig | 12 single-mode). HIPPI networks are commonly used for clusters and to
|
/linux-4.1.27/Documentation/arm/ |
D | cluster-pm-race-avoidance.txt | 21 In a system containing multiple clusters of CPUs, it is also desirable 22 to have the ability to turn off entire clusters. 24 Turning entire clusters off and on is a risky business, because it 484 clusters of clusters are not supported). The algorithm could be
|
/linux-4.1.27/drivers/block/drbd/ |
D | Kconfig | 21 clusters and in this context, is a "drop-in" replacement for shared
|
/linux-4.1.27/drivers/s390/block/ |
D | Kconfig | 90 Force writes to Storage Class Memory (SCM) to be in done in clusters.
|
/linux-4.1.27/Documentation/devicetree/bindings/arm/ |
D | topology.txt | 167 Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters):
|
D | cci.txt | 10 clusters, through memory mapped interface, with a global control register
|
D | idle-states.txt | 580 Example 2 (ARM 32-bit, 8-cpu system, two clusters):
|
/linux-4.1.27/Documentation/filesystems/ |
D | vfat.txt | 81 usefree -- Use the "free clusters" value stored on FSINFO. It'll 82 be used to determine number of free clusters without 85 case. If you are sure the "free clusters" on FSINFO is
|
D | xfs.txt | 91 clusters and keeps them around on disk. When noikeep is 92 specified, empty inode clusters are returned to the free
|
D | ceph.txt | 24 clusters, similar to Lustre. Unlike Lustre, however, metadata and
|
D | ext4.txt | 504 clusters in the file system which will be used 507 loss. The default is 2% or 4096 clusters, 509 however it can never exceed number of clusters
|
D | ntfs.txt | 94 clusters. But at present only limited support for highly fragmented files,
|
/linux-4.1.27/arch/arm64/boot/dts/freescale/ |
D | fsl-ls2085a.dtsi | 69 /* We have 4 clusters having 2 Cortex-A57 cores each */
|
/linux-4.1.27/Documentation/cgroups/ |
D | freezer-subsystem.txt | 4 is often used on HPC clusters to schedule access to the cluster as a
|
/linux-4.1.27/net/ipv4/netfilter/ |
D | Kconfig | 319 The CLUSTERIP target allows you to build load-balancing clusters of
|
/linux-4.1.27/arch/arm/ |
D | Kconfig | 1398 to 2 clusters by default. 1399 Platforms with 3 or 4 clusters that use MCPM must select this 1400 option to allow the additional clusters to be managed.
|
/linux-4.1.27/fs/ext4/ |
D | super.c | 3426 ext4_fsblk_t clusters = ext4_blocks_count(sbi->s_es) >> in ext4_reserve_clusters() local 3429 if (count >= clusters) in ext4_reserve_clusters()
|
/linux-4.1.27/net/netfilter/ |
D | Kconfig | 976 This option allows you to build work-load-sharing clusters of
|