chunk_mask        153 drivers/md/dm-exception-store.c 		store->chunk_size = store->chunk_mask = store->chunk_shift = 0;
chunk_mask        185 drivers/md/dm-exception-store.c 	store->chunk_mask = chunk_size - 1;
chunk_mask        122 drivers/md/dm-exception-store.h 	unsigned chunk_mask;
chunk_mask        326 drivers/md/dm-snap-persistent.c 		ps->store->chunk_mask = ps->store->chunk_size - 1;
chunk_mask       1907 drivers/md/dm-snap.c 		(bio->bi_iter.bi_sector & s->store->chunk_mask);
chunk_mask        579 drivers/md/raid10.c 	sector = r10bio->sector & geo->chunk_mask;
chunk_mask        619 drivers/md/raid10.c 			sector += (geo->chunk_mask + 1);
chunk_mask        661 drivers/md/raid10.c 	offset = sector & geo->chunk_mask;
chunk_mask       1524 drivers/md/raid10.c 	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
chunk_mask       1525 drivers/md/raid10.c 	int chunk_sects = chunk_mask + 1;
chunk_mask       1539 drivers/md/raid10.c 	if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
chunk_mask       2913 drivers/md/raid10.c 	sector_t chunk_mask = conf->geo.chunk_mask;
chunk_mask       3012 drivers/md/raid10.c 	    max_sector > (sector_nr | chunk_mask))
chunk_mask       3013 drivers/md/raid10.c 		max_sector = (sector_nr | chunk_mask) + 1;
chunk_mask       3332 drivers/md/raid10.c 		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
chunk_mask       3626 drivers/md/raid10.c 	geo->chunk_mask = chunk - 1;
chunk_mask       4097 drivers/md/raid10.c 	if (mddev->array_sectors & geo.chunk_mask)
chunk_mask       4370 drivers/md/raid10.c 	s = (s | geo->chunk_mask) + 1;
chunk_mask       4484 drivers/md/raid10.c 		sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
chunk_mask       4485 drivers/md/raid10.c 					       & conf->prev.chunk_mask);
chunk_mask       4506 drivers/md/raid10.c 		last  = sector_nr | (conf->geo.chunk_mask
chunk_mask       4507 drivers/md/raid10.c 				     & conf->prev.chunk_mask);
chunk_mask         56 drivers/md/raid10.h 		sector_t	chunk_mask;
chunk_mask        297 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	handle &= rx_ring->xsk_umem->chunk_mask;
chunk_mask        456 drivers/net/ethernet/intel/i40e/i40e_xsk.c 	mask = rx_ring->xsk_umem->chunk_mask;
chunk_mask        235 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	mask = rx_ring->xsk_umem->chunk_mask;
chunk_mask        295 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 	handle &= rx_ring->xsk_umem->chunk_mask;
chunk_mask         48 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask);
chunk_mask        272 fs/ext2/dir.c  	unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
chunk_mask        297 fs/ext2/dir.c  				offset = ext2_validate_entry(kaddr, offset, chunk_mask);
chunk_mask        117 fs/ufs/dir.c   	const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1;
chunk_mask        123 fs/ufs/dir.c   		if (limit & chunk_mask)
chunk_mask        138 fs/ufs/dir.c   		if (((offs + rec_len - 1) ^ offs) & ~chunk_mask)
chunk_mask        431 fs/ufs/dir.c   	unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
chunk_mask        456 fs/ufs/dir.c   				offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
chunk_mask         48 include/net/xdp_sock.h 	u64 chunk_mask;
chunk_mask        393 net/xdp/xdp_umem.c 	umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK
chunk_mask        693 net/xdp/xsk.c  			      xs->umem->chunk_mask);
chunk_mask        695 net/xdp/xsk.c  			      xs->umem->chunk_mask);
chunk_mask        707 net/xdp/xsk.c  	xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
chunk_mask        708 net/xdp/xsk.c  	xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
chunk_mask         12 net/xdp/xsk_queue.c void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
chunk_mask         18 net/xdp/xsk_queue.c 	q->chunk_mask = chunk_mask;
chunk_mask         35 net/xdp/xsk_queue.h 	u64 chunk_mask;
chunk_mask        181 net/xdp/xsk_queue.h 		*addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
chunk_mask        290 net/xdp/xsk_queue.h 	if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
chunk_mask        374 net/xdp/xsk_queue.h void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);