cluster_base       58 drivers/soc/fsl/dpio/dpio-driver.c 	int cluster_base, cluster_size;
cluster_base       61 drivers/soc/fsl/dpio/dpio-driver.c 		cluster_base = 2;
cluster_base       66 drivers/soc/fsl/dpio/dpio-driver.c 		cluster_base = 0;
cluster_base       73 drivers/soc/fsl/dpio/dpio-driver.c 	return cluster_base + cpu / cluster_size;
cluster_base      211 fs/xfs/scrub/ialloc.c 	unsigned int			cluster_base)
cluster_base      229 fs/xfs/scrub/ialloc.c 	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
cluster_base      235 fs/xfs/scrub/ialloc.c 		cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
cluster_base      251 fs/xfs/scrub/ialloc.c 	if (imap.im_boffset != 0 && cluster_base != 0) {
cluster_base      252 fs/xfs/scrub/ialloc.c 		ASSERT(imap.im_boffset == 0 || cluster_base == 0);
cluster_base      258 fs/xfs/scrub/ialloc.c 			imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
cluster_base      261 fs/xfs/scrub/ialloc.c 					  cluster_base));
cluster_base      297 fs/xfs/scrub/ialloc.c 				cluster_base + cluster_index, dip);
cluster_base      317 fs/xfs/scrub/ialloc.c 	unsigned int			cluster_base;
cluster_base      327 fs/xfs/scrub/ialloc.c 	for (cluster_base = 0;
cluster_base      328 fs/xfs/scrub/ialloc.c 	     cluster_base < XFS_INODES_PER_CHUNK;
cluster_base      329 fs/xfs/scrub/ialloc.c 	     cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) {
cluster_base      330 fs/xfs/scrub/ialloc.c 		error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);