zone              547 arch/ia64/include/asm/pgtable.h     extern void memmap_init (unsigned long size, int nid, unsigned long zone,
zone              491 arch/ia64/mm/init.c 	unsigned long zone;
zone              520 arch/ia64/mm/init.c 				 args->nid, args->zone, page_to_pfn(map_start),
zone              526 arch/ia64/mm/init.c memmap_init (unsigned long size, int nid, unsigned long zone,
zone              530 arch/ia64/mm/init.c 		memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
zone              540 arch/ia64/mm/init.c 		args.zone = zone;
zone               43 arch/m68k/mm/mcfmmu.c 	enum zone_type zone;
zone               84 arch/m68k/mm/mcfmmu.c 	for (zone = 0; zone < MAX_NR_ZONES; zone++)
zone               85 arch/m68k/mm/mcfmmu.c 		zones_size[zone] = 0x0;
zone              199 arch/s390/include/asm/ap.h 	unsigned int zone  : 8;	/* zone info */
zone              256 arch/s390/mm/page-states.c 	struct zone *zone;
zone              262 arch/s390/mm/page-states.c 	for_each_populated_zone(zone) {
zone              263 arch/s390/mm/page-states.c 		spin_lock_irqsave(&zone->lock, flags);
zone              265 arch/s390/mm/page-states.c 			list_for_each(l, &zone->free_area[order].free_list[t]) {
zone              273 arch/s390/mm/page-states.c 		spin_unlock_irqrestore(&zone->lock, flags);
zone              110 arch/x86/mm/highmem_32.c 	struct zone *zone;
zone              118 arch/x86/mm/highmem_32.c 	for_each_zone(zone) {
zone              121 arch/x86/mm/highmem_32.c 		if (!is_highmem(zone))
zone              124 arch/x86/mm/highmem_32.c 		zone_start_pfn = zone->zone_start_pfn;
zone              125 arch/x86/mm/highmem_32.c 		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
zone              127 arch/x86/mm/highmem_32.c 		nid = zone_to_nid(zone);
zone              129 arch/x86/mm/highmem_32.c 				zone->name, nid, zone_start_pfn, zone_end_pfn);
zone              367 drivers/base/memory.c 		struct zone *default_zone)
zone              369 drivers/base/memory.c 	struct zone *zone;
zone              371 drivers/base/memory.c 	zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
zone              372 drivers/base/memory.c 	if (zone != default_zone) {
zone              374 drivers/base/memory.c 		strcat(buf, zone->name);
zone              385 drivers/base/memory.c 	struct zone *default_zone;
zone               43 drivers/block/null_blk_zoned.c 		struct blk_zone *zone = &dev->zones[i];
zone               45 drivers/block/null_blk_zoned.c 		zone->start = sector;
zone               46 drivers/block/null_blk_zoned.c 		zone->len = dev->zone_size_sects;
zone               47 drivers/block/null_blk_zoned.c 		zone->wp = zone->start + zone->len;
zone               48 drivers/block/null_blk_zoned.c 		zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
zone               49 drivers/block/null_blk_zoned.c 		zone->cond = BLK_ZONE_COND_NOT_WP;
zone               55 drivers/block/null_blk_zoned.c 		struct blk_zone *zone = &dev->zones[i];
zone               57 drivers/block/null_blk_zoned.c 		zone->start = zone->wp = sector;
zone               58 drivers/block/null_blk_zoned.c 		zone->len = dev->zone_size_sects;
zone               59 drivers/block/null_blk_zoned.c 		zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone               60 drivers/block/null_blk_zoned.c 		zone->cond = BLK_ZONE_COND_EMPTY;
zone               96 drivers/block/null_blk_zoned.c 	struct blk_zone *zone = &dev->zones[zno];
zone               98 drivers/block/null_blk_zoned.c 	switch (zone->cond) {
zone              106 drivers/block/null_blk_zoned.c 		if (sector != zone->wp)
zone              109 drivers/block/null_blk_zoned.c 		if (zone->cond == BLK_ZONE_COND_EMPTY)
zone              110 drivers/block/null_blk_zoned.c 			zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone              112 drivers/block/null_blk_zoned.c 		zone->wp += nr_sectors;
zone              113 drivers/block/null_blk_zoned.c 		if (zone->wp == zone->start + zone->len)
zone              114 drivers/block/null_blk_zoned.c 			zone->cond = BLK_ZONE_COND_FULL;
zone              129 drivers/block/null_blk_zoned.c 	struct blk_zone *zone = &dev->zones[zno];
zone              135 drivers/block/null_blk_zoned.c 			if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
zone              137 drivers/block/null_blk_zoned.c 			zone[i].cond = BLK_ZONE_COND_EMPTY;
zone              138 drivers/block/null_blk_zoned.c 			zone[i].wp = zone[i].start;
zone              142 drivers/block/null_blk_zoned.c 		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
zone              145 drivers/block/null_blk_zoned.c 		zone->cond = BLK_ZONE_COND_EMPTY;
zone              146 drivers/block/null_blk_zoned.c 		zone->wp = zone->start;
zone             1060 drivers/block/pktcdvd.c static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
zone             1065 drivers/block/pktcdvd.c 		if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
zone             1067 drivers/block/pktcdvd.c 			if (pkt->sector != zone)
zone             1166 drivers/block/pktcdvd.c 	sector_t zone = 0; /* Suppress gcc warning */
zone             1191 drivers/block/pktcdvd.c 		zone = get_zone(bio->bi_iter.bi_sector, pd);
zone             1193 drivers/block/pktcdvd.c 			if (p->sector == zone) {
zone             1215 drivers/block/pktcdvd.c 	pkt = pkt_get_packet_data(pd, zone);
zone             1217 drivers/block/pktcdvd.c 	pd->current_sector = zone + pd->settings.size;
zone             1218 drivers/block/pktcdvd.c 	pkt->sector = zone;
zone             1227 drivers/block/pktcdvd.c 	pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
zone             1228 drivers/block/pktcdvd.c 	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
zone             1232 drivers/block/pktcdvd.c 		if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
zone             2350 drivers/block/pktcdvd.c 	sector_t zone;
zone             2355 drivers/block/pktcdvd.c 	zone = get_zone(bio->bi_iter.bi_sector, pd);
zone             2364 drivers/block/pktcdvd.c 		if (pkt->sector == zone) {
zone             2469 drivers/block/pktcdvd.c 		sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
zone             2472 drivers/block/pktcdvd.c 		if (last_zone != zone) {
zone             2473 drivers/block/pktcdvd.c 			BUG_ON(last_zone != zone + pd->settings.size);
zone             1176 drivers/gpu/drm/bridge/sil-sii8620.c 			u8 zone;
zone             1202 drivers/gpu/drm/bridge/sil-sii8620.c 			REG_MHL3_TX_ZONE_CTL, clk_spec[i].zone);
zone               81 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone =
zone               85 drivers/gpu/drm/ttm/ttm_memory.c 		zone->name, (unsigned long long)zone->used_mem >> 10);
zone               86 drivers/gpu/drm/ttm/ttm_memory.c 	kfree(zone);
zone               93 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone =
zone               97 drivers/gpu/drm/ttm/ttm_memory.c 	spin_lock(&zone->glob->lock);
zone               99 drivers/gpu/drm/ttm/ttm_memory.c 		val = zone->zone_mem;
zone              101 drivers/gpu/drm/ttm/ttm_memory.c 		val = zone->emer_mem;
zone              103 drivers/gpu/drm/ttm/ttm_memory.c 		val = zone->max_mem;
zone              105 drivers/gpu/drm/ttm/ttm_memory.c 		val = zone->swap_limit;
zone              107 drivers/gpu/drm/ttm/ttm_memory.c 		val = zone->used_mem;
zone              108 drivers/gpu/drm/ttm/ttm_memory.c 	spin_unlock(&zone->glob->lock);
zone              121 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone =
zone              134 drivers/gpu/drm/ttm/ttm_memory.c 	spin_lock(&zone->glob->lock);
zone              135 drivers/gpu/drm/ttm/ttm_memory.c 	if (val64 > zone->zone_mem)
zone              136 drivers/gpu/drm/ttm/ttm_memory.c 		val64 = zone->zone_mem;
zone              138 drivers/gpu/drm/ttm/ttm_memory.c 		zone->emer_mem = val64;
zone              139 drivers/gpu/drm/ttm/ttm_memory.c 		if (zone->max_mem > val64)
zone              140 drivers/gpu/drm/ttm/ttm_memory.c 			zone->max_mem = val64;
zone              142 drivers/gpu/drm/ttm/ttm_memory.c 		zone->max_mem = val64;
zone              143 drivers/gpu/drm/ttm/ttm_memory.c 		if (zone->emer_mem < val64)
zone              144 drivers/gpu/drm/ttm/ttm_memory.c 			zone->emer_mem = val64;
zone              146 drivers/gpu/drm/ttm/ttm_memory.c 		zone->swap_limit = val64;
zone              147 drivers/gpu/drm/ttm/ttm_memory.c 	spin_unlock(&zone->glob->lock);
zone              149 drivers/gpu/drm/ttm/ttm_memory.c 	ttm_check_swapping(zone->glob);
zone              241 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone;
zone              245 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zones[i];
zone              248 drivers/gpu/drm/ttm/ttm_memory.c 			target = zone->swap_limit;
zone              250 drivers/gpu/drm/ttm/ttm_memory.c 			target = zone->emer_mem;
zone              252 drivers/gpu/drm/ttm/ttm_memory.c 			target = zone->max_mem;
zone              256 drivers/gpu/drm/ttm/ttm_memory.c 		if (zone->used_mem > target)
zone              302 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
zone              306 drivers/gpu/drm/ttm/ttm_memory.c 	if (unlikely(!zone))
zone              312 drivers/gpu/drm/ttm/ttm_memory.c 	zone->name = "kernel";
zone              313 drivers/gpu/drm/ttm/ttm_memory.c 	zone->zone_mem = mem;
zone              314 drivers/gpu/drm/ttm/ttm_memory.c 	zone->max_mem = mem >> 1;
zone              315 drivers/gpu/drm/ttm/ttm_memory.c 	zone->emer_mem = (mem >> 1) + (mem >> 2);
zone              316 drivers/gpu/drm/ttm/ttm_memory.c 	zone->swap_limit = zone->max_mem - (mem >> 3);
zone              317 drivers/gpu/drm/ttm/ttm_memory.c 	zone->used_mem = 0;
zone              318 drivers/gpu/drm/ttm/ttm_memory.c 	zone->glob = glob;
zone              319 drivers/gpu/drm/ttm/ttm_memory.c 	glob->zone_kernel = zone;
zone              321 drivers/gpu/drm/ttm/ttm_memory.c 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
zone              323 drivers/gpu/drm/ttm/ttm_memory.c 		kobject_put(&zone->kobj);
zone              326 drivers/gpu/drm/ttm/ttm_memory.c 	glob->zones[glob->num_zones++] = zone;
zone              334 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone;
zone              341 drivers/gpu/drm/ttm/ttm_memory.c 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
zone              342 drivers/gpu/drm/ttm/ttm_memory.c 	if (unlikely(!zone))
zone              348 drivers/gpu/drm/ttm/ttm_memory.c 	zone->name = "highmem";
zone              349 drivers/gpu/drm/ttm/ttm_memory.c 	zone->zone_mem = mem;
zone              350 drivers/gpu/drm/ttm/ttm_memory.c 	zone->max_mem = mem >> 1;
zone              351 drivers/gpu/drm/ttm/ttm_memory.c 	zone->emer_mem = (mem >> 1) + (mem >> 2);
zone              352 drivers/gpu/drm/ttm/ttm_memory.c 	zone->swap_limit = zone->max_mem - (mem >> 3);
zone              353 drivers/gpu/drm/ttm/ttm_memory.c 	zone->used_mem = 0;
zone              354 drivers/gpu/drm/ttm/ttm_memory.c 	zone->glob = glob;
zone              355 drivers/gpu/drm/ttm/ttm_memory.c 	glob->zone_highmem = zone;
zone              357 drivers/gpu/drm/ttm/ttm_memory.c 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
zone              358 drivers/gpu/drm/ttm/ttm_memory.c 		zone->name);
zone              360 drivers/gpu/drm/ttm/ttm_memory.c 		kobject_put(&zone->kobj);
zone              363 drivers/gpu/drm/ttm/ttm_memory.c 	glob->zones[glob->num_zones++] = zone;
zone              370 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
zone              374 drivers/gpu/drm/ttm/ttm_memory.c 	if (unlikely(!zone))
zone              385 drivers/gpu/drm/ttm/ttm_memory.c 		kfree(zone);
zone              396 drivers/gpu/drm/ttm/ttm_memory.c 	zone->name = "dma32";
zone              397 drivers/gpu/drm/ttm/ttm_memory.c 	zone->zone_mem = mem;
zone              398 drivers/gpu/drm/ttm/ttm_memory.c 	zone->max_mem = mem >> 1;
zone              399 drivers/gpu/drm/ttm/ttm_memory.c 	zone->emer_mem = (mem >> 1) + (mem >> 2);
zone              400 drivers/gpu/drm/ttm/ttm_memory.c 	zone->swap_limit = zone->max_mem - (mem >> 3);
zone              401 drivers/gpu/drm/ttm/ttm_memory.c 	zone->used_mem = 0;
zone              402 drivers/gpu/drm/ttm/ttm_memory.c 	zone->glob = glob;
zone              403 drivers/gpu/drm/ttm/ttm_memory.c 	glob->zone_dma32 = zone;
zone              405 drivers/gpu/drm/ttm/ttm_memory.c 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
zone              407 drivers/gpu/drm/ttm/ttm_memory.c 		kobject_put(&zone->kobj);
zone              410 drivers/gpu/drm/ttm/ttm_memory.c 	glob->zones[glob->num_zones++] = zone;
zone              420 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone;
zone              450 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zones[i];
zone              452 drivers/gpu/drm/ttm/ttm_memory.c 			zone->name, (unsigned long long)zone->max_mem >> 10);
zone              464 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone;
zone              475 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zones[i];
zone              476 drivers/gpu/drm/ttm/ttm_memory.c 		kobject_del(&zone->kobj);
zone              477 drivers/gpu/drm/ttm/ttm_memory.c 		kobject_put(&zone->kobj);
zone              488 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone;
zone              492 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zones[i];
zone              493 drivers/gpu/drm/ttm/ttm_memory.c 		if (zone->used_mem > zone->swap_limit) {
zone              511 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone;
zone              515 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zones[i];
zone              516 drivers/gpu/drm/ttm/ttm_memory.c 		if (single_zone && zone != single_zone)
zone              518 drivers/gpu/drm/ttm/ttm_memory.c 		zone->used_mem -= amount;
zone              566 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone;
zone              570 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zones[i];
zone              571 drivers/gpu/drm/ttm/ttm_memory.c 		if (single_zone && zone != single_zone)
zone              575 drivers/gpu/drm/ttm/ttm_memory.c 			zone->emer_mem : zone->max_mem;
zone              577 drivers/gpu/drm/ttm/ttm_memory.c 		if (zone->used_mem > limit)
zone              583 drivers/gpu/drm/ttm/ttm_memory.c 			zone = glob->zones[i];
zone              584 drivers/gpu/drm/ttm/ttm_memory.c 			if (single_zone && zone != single_zone)
zone              586 drivers/gpu/drm/ttm/ttm_memory.c 			zone->used_mem += amount;
zone              636 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone = NULL;
zone              645 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zone_highmem;
zone              648 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zone_kernel;
zone              650 drivers/gpu/drm/ttm/ttm_memory.c 	return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
zone              656 drivers/gpu/drm/ttm/ttm_memory.c 	struct ttm_mem_zone *zone = NULL;
zone              660 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zone_highmem;
zone              663 drivers/gpu/drm/ttm/ttm_memory.c 		zone = glob->zone_kernel;
zone              665 drivers/gpu/drm/ttm/ttm_memory.c 	ttm_mem_global_free_zone(glob, zone, size);
zone             1553 drivers/hwmon/dme1737.c static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_channels_temp, S_IRUGO, \
zone             1555 drivers/hwmon/dme1737.c static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp_hyst, S_IRUGO, \
zone             1557 drivers/hwmon/dme1737.c static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp, S_IRUGO, \
zone             1559 drivers/hwmon/dme1737.c static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point2_temp, S_IRUGO, \
zone             1561 drivers/hwmon/dme1737.c static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point3_temp, S_IRUGO, \
zone              227 drivers/hwmon/lm85.c static int ZONE_TO_REG(int zone)
zone              232 drivers/hwmon/lm85.c 		if (zone == lm85_zone_map[i])
zone              318 drivers/hwmon/lm85.c 	struct lm85_zone zone[3];
zone              511 drivers/hwmon/lm85.c 			data->zone[i].range = val >> 4;
zone              514 drivers/hwmon/lm85.c 			data->zone[i].limit =
zone              516 drivers/hwmon/lm85.c 			data->zone[i].critical =
zone              522 drivers/hwmon/lm85.c 				data->zone[i].limit -= 64;
zone              523 drivers/hwmon/lm85.c 				data->zone[i].critical -= 64;
zone              534 drivers/hwmon/lm85.c 			data->zone[0].hyst = i >> 4;
zone              535 drivers/hwmon/lm85.c 			data->zone[1].hyst = i & 0x0f;
zone              538 drivers/hwmon/lm85.c 			data->zone[2].hyst = i >> 4;
zone              820 drivers/hwmon/lm85.c 				 (data->zone[nr].range << 4)
zone             1135 drivers/hwmon/lm85.c 	return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit) -
zone             1136 drivers/hwmon/lm85.c 		HYST_FROM_REG(data->zone[nr].hyst));
zone             1155 drivers/hwmon/lm85.c 	min = TEMP_FROM_REG(data->zone[nr].limit);
zone             1156 drivers/hwmon/lm85.c 	data->zone[nr].hyst = HYST_TO_REG(min - val);
zone             1159 drivers/hwmon/lm85.c 			(data->zone[0].hyst << 4)
zone             1160 drivers/hwmon/lm85.c 			| data->zone[1].hyst);
zone             1163 drivers/hwmon/lm85.c 			(data->zone[2].hyst << 4));
zone             1175 drivers/hwmon/lm85.c 	return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit));
zone             1193 drivers/hwmon/lm85.c 	data->zone[nr].limit = TEMP_TO_REG(val);
zone             1195 drivers/hwmon/lm85.c 		data->zone[nr].limit);
zone             1198 drivers/hwmon/lm85.c 	data->zone[nr].range = RANGE_TO_REG(
zone             1199 drivers/hwmon/lm85.c 		TEMP_FROM_REG(data->zone[nr].max_desired) -
zone             1200 drivers/hwmon/lm85.c 		TEMP_FROM_REG(data->zone[nr].limit));
zone             1202 drivers/hwmon/lm85.c 		((data->zone[nr].range & 0x0f) << 4)
zone             1215 drivers/hwmon/lm85.c 	return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit) +
zone             1216 drivers/hwmon/lm85.c 		RANGE_FROM_REG(data->zone[nr].range));
zone             1235 drivers/hwmon/lm85.c 	min = TEMP_FROM_REG(data->zone[nr].limit);
zone             1236 drivers/hwmon/lm85.c 	data->zone[nr].max_desired = TEMP_TO_REG(val);
zone             1237 drivers/hwmon/lm85.c 	data->zone[nr].range = RANGE_TO_REG(
zone             1240 drivers/hwmon/lm85.c 		((data->zone[nr].range & 0x0f) << 4)
zone             1252 drivers/hwmon/lm85.c 	return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].critical));
zone             1270 drivers/hwmon/lm85.c 	data->zone[nr].critical = TEMP_TO_REG(val);
zone             1272 drivers/hwmon/lm85.c 		data->zone[nr].critical);
zone               67 drivers/hwmon/scpi-hwmon.c 	struct scpi_thermal_zone *zone = dev;
zone               68 drivers/hwmon/scpi-hwmon.c 	struct scpi_sensors *scpi_sensors = zone->scpi_sensors;
zone               70 drivers/hwmon/scpi-hwmon.c 	struct sensor_data *sensor = &scpi_sensors->data[zone->sensor_id];
zone              260 drivers/hwmon/scpi-hwmon.c 		struct scpi_thermal_zone *zone;
zone              265 drivers/hwmon/scpi-hwmon.c 		zone = devm_kzalloc(dev, sizeof(*zone), GFP_KERNEL);
zone              266 drivers/hwmon/scpi-hwmon.c 		if (!zone)
zone              269 drivers/hwmon/scpi-hwmon.c 		zone->sensor_id = i;
zone              270 drivers/hwmon/scpi-hwmon.c 		zone->scpi_sensors = scpi_sensors;
zone              273 drivers/hwmon/scpi-hwmon.c 							 zone,
zone              282 drivers/hwmon/scpi-hwmon.c 			devm_kfree(dev, zone);
zone               57 drivers/iio/light/lm3533-als.c 	atomic_t zone;
zone               86 drivers/iio/light/lm3533-als.c static int _lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone)
zone               99 drivers/iio/light/lm3533-als.c 	*zone = min_t(u8, val, LM3533_ALS_ZONE_MAX);
zone              104 drivers/iio/light/lm3533-als.c static int lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone)
zone              110 drivers/iio/light/lm3533-als.c 		*zone = atomic_read(&als->zone);
zone              112 drivers/iio/light/lm3533-als.c 		ret = _lm3533_als_get_zone(indio_dev, zone);
zone              124 drivers/iio/light/lm3533-als.c static inline u8 lm3533_als_get_target_reg(unsigned channel, unsigned zone)
zone              126 drivers/iio/light/lm3533-als.c 	return LM3533_REG_ALS_TARGET_BASE + 5 * channel + zone;
zone              130 drivers/iio/light/lm3533-als.c 							unsigned zone, u8 *val)
zone              139 drivers/iio/light/lm3533-als.c 	if (zone > LM3533_ALS_ZONE_MAX)
zone              142 drivers/iio/light/lm3533-als.c 	reg = lm3533_als_get_target_reg(channel, zone);
zone              151 drivers/iio/light/lm3533-als.c 							unsigned zone, u8 val)
zone              160 drivers/iio/light/lm3533-als.c 	if (zone > LM3533_ALS_ZONE_MAX)
zone              163 drivers/iio/light/lm3533-als.c 	reg = lm3533_als_get_target_reg(channel, zone);
zone              174 drivers/iio/light/lm3533-als.c 	u8 zone;
zone              178 drivers/iio/light/lm3533-als.c 	ret = lm3533_als_get_zone(indio_dev, &zone);
zone              182 drivers/iio/light/lm3533-als.c 	ret = lm3533_als_get_target(indio_dev, channel, zone, &target);
zone              251 drivers/iio/light/lm3533-als.c 	u8 zone;
zone              255 drivers/iio/light/lm3533-als.c 	ret = _lm3533_als_get_zone(indio_dev, &zone);
zone              259 drivers/iio/light/lm3533-als.c 	atomic_set(&als->zone, zone);
zone              431 drivers/iio/light/lm3533-als.c 	u8 zone;
zone              443 drivers/iio/light/lm3533-als.c 		ret = lm3533_als_get_zone(indio_dev, &zone);
zone              447 drivers/iio/light/lm3533-als.c 		atomic_set(&als->zone, zone);
zone              470 drivers/iio/light/lm3533-als.c 	u8 zone;
zone              473 drivers/iio/light/lm3533-als.c 	ret = lm3533_als_get_zone(indio_dev, &zone);
zone              477 drivers/iio/light/lm3533-als.c 	return scnprintf(buf, PAGE_SIZE, "%u\n", zone);
zone              670 drivers/iio/light/lm3533-als.c static ILLUMINANCE_ATTR_RO(zone);
zone              862 drivers/iio/light/lm3533-als.c 	atomic_set(&als->zone, 0);
zone              190 drivers/md/dm-zoned-metadata.c unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone)
zone              192 drivers/md/dm-zoned-metadata.c 	return ((unsigned int)(zone - zmd->zones));
zone              195 drivers/md/dm-zoned-metadata.c sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
zone              197 drivers/md/dm-zoned-metadata.c 	return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift;
zone              200 drivers/md/dm-zoned-metadata.c sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
zone              202 drivers/md/dm-zoned-metadata.c 	return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift;
zone             1092 drivers/md/dm-zoned-metadata.c static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
zone             1104 drivers/md/dm-zoned-metadata.c 	INIT_LIST_HEAD(&zone->link);
zone             1105 drivers/md/dm-zoned-metadata.c 	atomic_set(&zone->refcount, 0);
zone             1106 drivers/md/dm-zoned-metadata.c 	zone->chunk = DMZ_MAP_UNMAPPED;
zone             1109 drivers/md/dm-zoned-metadata.c 		set_bit(DMZ_RND, &zone->flags);
zone             1112 drivers/md/dm-zoned-metadata.c 		set_bit(DMZ_SEQ, &zone->flags);
zone             1117 drivers/md/dm-zoned-metadata.c 		set_bit(DMZ_OFFLINE, &zone->flags);
zone             1119 drivers/md/dm-zoned-metadata.c 		set_bit(DMZ_READ_ONLY, &zone->flags);
zone             1121 drivers/md/dm-zoned-metadata.c 	if (dmz_is_rnd(zone))
zone             1122 drivers/md/dm-zoned-metadata.c 		zone->wp_block = 0;
zone             1124 drivers/md/dm-zoned-metadata.c 		zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
zone             1126 drivers/md/dm-zoned-metadata.c 	if (!dmz_is_offline(zone) && !dmz_is_readonly(zone)) {
zone             1128 drivers/md/dm-zoned-metadata.c 		if (dmz_is_rnd(zone)) {
zone             1132 drivers/md/dm-zoned-metadata.c 				zmd->sb_zone = zone;
zone             1162 drivers/md/dm-zoned-metadata.c 	struct dm_zone *zone;
zone             1197 drivers/md/dm-zoned-metadata.c 	zone = zmd->zones;
zone             1212 drivers/md/dm-zoned-metadata.c 			ret = dmz_init_zone(zmd, zone, &blkz[i]);
zone             1216 drivers/md/dm-zoned-metadata.c 			zone++;
zone             1236 drivers/md/dm-zoned-metadata.c static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
zone             1250 drivers/md/dm-zoned-metadata.c 	ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
zone             1257 drivers/md/dm-zoned-metadata.c 			    dmz_id(zmd, zone));
zone             1262 drivers/md/dm-zoned-metadata.c 	clear_bit(DMZ_OFFLINE, &zone->flags);
zone             1263 drivers/md/dm-zoned-metadata.c 	clear_bit(DMZ_READ_ONLY, &zone->flags);
zone             1265 drivers/md/dm-zoned-metadata.c 		set_bit(DMZ_OFFLINE, &zone->flags);
zone             1267 drivers/md/dm-zoned-metadata.c 		set_bit(DMZ_READ_ONLY, &zone->flags);
zone             1269 drivers/md/dm-zoned-metadata.c 	if (dmz_is_seq(zone))
zone             1270 drivers/md/dm-zoned-metadata.c 		zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start);
zone             1272 drivers/md/dm-zoned-metadata.c 		zone->wp_block = 0;
zone             1282 drivers/md/dm-zoned-metadata.c 				    struct dm_zone *zone)
zone             1287 drivers/md/dm-zoned-metadata.c 	wp = zone->wp_block;
zone             1288 drivers/md/dm-zoned-metadata.c 	ret = dmz_update_zone(zmd, zone);
zone             1293 drivers/md/dm-zoned-metadata.c 		     dmz_id(zmd, zone), zone->wp_block, wp);
zone             1295 drivers/md/dm-zoned-metadata.c 	if (zone->wp_block < wp) {
zone             1296 drivers/md/dm-zoned-metadata.c 		dmz_invalidate_blocks(zmd, zone, zone->wp_block,
zone             1297 drivers/md/dm-zoned-metadata.c 				      wp - zone->wp_block);
zone             1311 drivers/md/dm-zoned-metadata.c static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
zone             1319 drivers/md/dm-zoned-metadata.c 	if (dmz_is_offline(zone) ||
zone             1320 drivers/md/dm-zoned-metadata.c 	    dmz_is_readonly(zone) ||
zone             1321 drivers/md/dm-zoned-metadata.c 	    dmz_is_rnd(zone))
zone             1324 drivers/md/dm-zoned-metadata.c 	if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
zone             1328 drivers/md/dm-zoned-metadata.c 					 dmz_start_sect(zmd, zone),
zone             1332 drivers/md/dm-zoned-metadata.c 				    dmz_id(zmd, zone), ret);
zone             1338 drivers/md/dm-zoned-metadata.c 	clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
zone             1339 drivers/md/dm-zoned-metadata.c 	zone->wp_block = 0;
zone             1344 drivers/md/dm-zoned-metadata.c static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
zone             1489 drivers/md/dm-zoned-metadata.c static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
zone             1491 drivers/md/dm-zoned-metadata.c 	if (list_empty(&zone->link))
zone             1494 drivers/md/dm-zoned-metadata.c 	list_del_init(&zone->link);
zone             1495 drivers/md/dm-zoned-metadata.c 	if (dmz_is_seq(zone)) {
zone             1497 drivers/md/dm-zoned-metadata.c 		list_add_tail(&zone->link, &zmd->map_seq_list);
zone             1500 drivers/md/dm-zoned-metadata.c 		list_add_tail(&zone->link, &zmd->map_rnd_list);
zone             1508 drivers/md/dm-zoned-metadata.c static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
zone             1510 drivers/md/dm-zoned-metadata.c 	__dmz_lru_zone(zmd, zone);
zone             1511 drivers/md/dm-zoned-metadata.c 	if (zone->bzone)
zone             1512 drivers/md/dm-zoned-metadata.c 		__dmz_lru_zone(zmd, zone->bzone);
zone             1538 drivers/md/dm-zoned-metadata.c int dmz_lock_zone_reclaim(struct dm_zone *zone)
zone             1541 drivers/md/dm-zoned-metadata.c 	if (dmz_is_active(zone))
zone             1544 drivers/md/dm-zoned-metadata.c 	return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
zone             1550 drivers/md/dm-zoned-metadata.c void dmz_unlock_zone_reclaim(struct dm_zone *zone)
zone             1552 drivers/md/dm-zoned-metadata.c 	WARN_ON(dmz_is_active(zone));
zone             1553 drivers/md/dm-zoned-metadata.c 	WARN_ON(!dmz_in_reclaim(zone));
zone             1555 drivers/md/dm-zoned-metadata.c 	clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
zone             1557 drivers/md/dm-zoned-metadata.c 	wake_up_bit(&zone->flags, DMZ_RECLAIM);
zone             1563 drivers/md/dm-zoned-metadata.c static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
zone             1567 drivers/md/dm-zoned-metadata.c 	wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
zone             1578 drivers/md/dm-zoned-metadata.c 	struct dm_zone *zone;
zone             1583 drivers/md/dm-zoned-metadata.c 	list_for_each_entry(zone, &zmd->map_rnd_list, link) {
zone             1584 drivers/md/dm-zoned-metadata.c 		if (dmz_is_buf(zone))
zone             1585 drivers/md/dm-zoned-metadata.c 			dzone = zone->bzone;
zone             1587 drivers/md/dm-zoned-metadata.c 			dzone = zone;
zone             1600 drivers/md/dm-zoned-metadata.c 	struct dm_zone *zone;
zone             1605 drivers/md/dm-zoned-metadata.c 	list_for_each_entry(zone, &zmd->map_seq_list, link) {
zone             1606 drivers/md/dm-zoned-metadata.c 		if (!zone->bzone)
zone             1608 drivers/md/dm-zoned-metadata.c 		if (dmz_lock_zone_reclaim(zone))
zone             1609 drivers/md/dm-zoned-metadata.c 			return zone;
zone             1620 drivers/md/dm-zoned-metadata.c 	struct dm_zone *zone;
zone             1632 drivers/md/dm-zoned-metadata.c 		zone = dmz_get_seq_zone_for_reclaim(zmd);
zone             1634 drivers/md/dm-zoned-metadata.c 		zone = dmz_get_rnd_zone_for_reclaim(zmd);
zone             1637 drivers/md/dm-zoned-metadata.c 	return zone;
zone             1801 drivers/md/dm-zoned-metadata.c 	struct dm_zone *zone;
zone             1817 drivers/md/dm-zoned-metadata.c 		zone = list_first_entry(&zmd->reserved_seq_zones_list,
zone             1819 drivers/md/dm-zoned-metadata.c 		list_del_init(&zone->link);
zone             1821 drivers/md/dm-zoned-metadata.c 		return zone;
zone             1824 drivers/md/dm-zoned-metadata.c 	zone = list_first_entry(list, struct dm_zone, link);
zone             1825 drivers/md/dm-zoned-metadata.c 	list_del_init(&zone->link);
zone             1827 drivers/md/dm-zoned-metadata.c 	if (dmz_is_rnd(zone))
zone             1832 drivers/md/dm-zoned-metadata.c 	if (dmz_is_offline(zone)) {
zone             1833 drivers/md/dm-zoned-metadata.c 		dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone));
zone             1834 drivers/md/dm-zoned-metadata.c 		zone = NULL;
zone             1838 drivers/md/dm-zoned-metadata.c 	return zone;
zone             1845 drivers/md/dm-zoned-metadata.c void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
zone             1848 drivers/md/dm-zoned-metadata.c 	if (dmz_is_seq(zone))
zone             1849 drivers/md/dm-zoned-metadata.c 		dmz_reset_zone(zmd, zone);
zone             1852 drivers/md/dm-zoned-metadata.c 	if (dmz_is_rnd(zone)) {
zone             1853 drivers/md/dm-zoned-metadata.c 		list_add_tail(&zone->link, &zmd->unmap_rnd_list);
zone             1857 drivers/md/dm-zoned-metadata.c 		list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
zone             1860 drivers/md/dm-zoned-metadata.c 		list_add_tail(&zone->link, &zmd->unmap_seq_list);
zone             1888 drivers/md/dm-zoned-metadata.c void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
zone             1890 drivers/md/dm-zoned-metadata.c 	unsigned int chunk = zone->chunk;
zone             1898 drivers/md/dm-zoned-metadata.c 	if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
zone             1903 drivers/md/dm-zoned-metadata.c 		dzone_id = dmz_id(zmd, zone->bzone);
zone             1904 drivers/md/dm-zoned-metadata.c 		zone->bzone->bzone = NULL;
zone             1905 drivers/md/dm-zoned-metadata.c 		zone->bzone = NULL;
zone             1912 drivers/md/dm-zoned-metadata.c 		if (WARN_ON(zone->bzone)) {
zone             1913 drivers/md/dm-zoned-metadata.c 			zone->bzone->bzone = NULL;
zone             1914 drivers/md/dm-zoned-metadata.c 			zone->bzone = NULL;
zone             1921 drivers/md/dm-zoned-metadata.c 	zone->chunk = DMZ_MAP_UNMAPPED;
zone             1922 drivers/md/dm-zoned-metadata.c 	list_del_init(&zone->link);
zone             1961 drivers/md/dm-zoned-metadata.c 					 struct dm_zone *zone,
zone             1965 drivers/md/dm-zoned-metadata.c 		(sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) +
zone             2036 drivers/md/dm-zoned-metadata.c int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
zone             2045 drivers/md/dm-zoned-metadata.c 		      dmz_id(zmd, zone), (unsigned long long)chunk_block,
zone             2052 drivers/md/dm-zoned-metadata.c 		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
zone             2071 drivers/md/dm-zoned-metadata.c 	if (likely(zone->weight + n <= zone_nr_blocks))
zone             2072 drivers/md/dm-zoned-metadata.c 		zone->weight += n;
zone             2075 drivers/md/dm-zoned-metadata.c 			     dmz_id(zmd, zone), zone->weight,
zone             2077 drivers/md/dm-zoned-metadata.c 		zone->weight = zone_nr_blocks;
zone             2117 drivers/md/dm-zoned-metadata.c int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
zone             2125 drivers/md/dm-zoned-metadata.c 		      dmz_id(zmd, zone), (u64)chunk_block, nr_blocks);
zone             2131 drivers/md/dm-zoned-metadata.c 		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
zone             2151 drivers/md/dm-zoned-metadata.c 	if (zone->weight >= n)
zone             2152 drivers/md/dm-zoned-metadata.c 		zone->weight -= n;
zone             2155 drivers/md/dm-zoned-metadata.c 			     dmz_id(zmd, zone), zone->weight, n);
zone             2156 drivers/md/dm-zoned-metadata.c 		zone->weight = 0;
zone             2165 drivers/md/dm-zoned-metadata.c static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
zone             2174 drivers/md/dm-zoned-metadata.c 	mblk = dmz_get_bitmap(zmd, zone, chunk_block);
zone             2191 drivers/md/dm-zoned-metadata.c static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
zone             2205 drivers/md/dm-zoned-metadata.c 		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
zone             2234 drivers/md/dm-zoned-metadata.c int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
zone             2239 drivers/md/dm-zoned-metadata.c 	valid = dmz_test_block(zmd, zone, chunk_block);
zone             2244 drivers/md/dm-zoned-metadata.c 	return dmz_to_next_set_block(zmd, zone, chunk_block,
zone             2254 drivers/md/dm-zoned-metadata.c int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
zone             2260 drivers/md/dm-zoned-metadata.c 	ret = dmz_to_next_set_block(zmd, zone, start_block,
zone             2268 drivers/md/dm-zoned-metadata.c 	return dmz_to_next_set_block(zmd, zone, start_block,
zone             2303 drivers/md/dm-zoned-metadata.c static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
zone             2314 drivers/md/dm-zoned-metadata.c 		mblk = dmz_get_bitmap(zmd, zone, chunk_block);
zone             2332 drivers/md/dm-zoned-metadata.c 	zone->weight = n;
zone             2402 drivers/md/dm-zoned-metadata.c 	struct dm_zone *zone;
zone             2444 drivers/md/dm-zoned-metadata.c 		zone = dmz_get(zmd, zid + i);
zone             2445 drivers/md/dm-zoned-metadata.c 		if (!dmz_is_rnd(zone))
zone             2447 drivers/md/dm-zoned-metadata.c 		set_bit(DMZ_META, &zone->flags);
zone             2527 drivers/md/dm-zoned-metadata.c 	struct dm_zone *zone;
zone             2534 drivers/md/dm-zoned-metadata.c 		zone = dmz_get(zmd, i);
zone             2535 drivers/md/dm-zoned-metadata.c 		if (!zone) {
zone             2540 drivers/md/dm-zoned-metadata.c 		wp_block = zone->wp_block;
zone             2542 drivers/md/dm-zoned-metadata.c 		ret = dmz_update_zone(zmd, zone);
zone             2548 drivers/md/dm-zoned-metadata.c 		if (dmz_is_offline(zone)) {
zone             2554 drivers/md/dm-zoned-metadata.c 		if (!dmz_is_seq(zone))
zone             2555 drivers/md/dm-zoned-metadata.c 			zone->wp_block = 0;
zone             2556 drivers/md/dm-zoned-metadata.c 		else if (zone->wp_block != wp_block) {
zone             2558 drivers/md/dm-zoned-metadata.c 				    i, (u64)zone->wp_block, (u64)wp_block);
zone             2559 drivers/md/dm-zoned-metadata.c 			zone->wp_block = wp_block;
zone             2560 drivers/md/dm-zoned-metadata.c 			dmz_invalidate_blocks(zmd, zone, zone->wp_block,
zone             2561 drivers/md/dm-zoned-metadata.c 					      dev->zone_nr_blocks - zone->wp_block);
zone               58 drivers/md/dm-zoned-reclaim.c static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
zone               62 drivers/md/dm-zoned-reclaim.c 	sector_t wp_block = zone->wp_block;
zone               78 drivers/md/dm-zoned-reclaim.c 				   dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
zone               83 drivers/md/dm-zoned-reclaim.c 			    dmz_id(zmd, zone), (unsigned long long)wp_block,
zone               89 drivers/md/dm-zoned-reclaim.c 	zone->wp_block = block;
zone               21 drivers/md/dm-zoned-target.c 	struct dm_zone		*zone;
zone               87 drivers/md/dm-zoned-target.c 		struct dm_zone *zone = bioctx->zone;
zone               89 drivers/md/dm-zoned-target.c 		if (zone) {
zone               92 drivers/md/dm-zoned-target.c 			    dmz_is_seq(zone))
zone               93 drivers/md/dm-zoned-target.c 				set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
zone               94 drivers/md/dm-zoned-target.c 			dmz_deactivate_zone(zone);
zone              117 drivers/md/dm-zoned-target.c static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
zone              130 drivers/md/dm-zoned-target.c 		dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
zone              140 drivers/md/dm-zoned-target.c 	if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone              141 drivers/md/dm-zoned-target.c 		zone->wp_block += nr_blocks;
zone              165 drivers/md/dm-zoned-target.c static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
zone              175 drivers/md/dm-zoned-target.c 	if (!zone) {
zone              182 drivers/md/dm-zoned-target.c 		      (dmz_is_rnd(zone) ? "RND" : "SEQ"),
zone              183 drivers/md/dm-zoned-target.c 		      dmz_id(dmz->metadata, zone),
zone              187 drivers/md/dm-zoned-target.c 	bzone = zone->bzone;
zone              190 drivers/md/dm-zoned-target.c 		if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
zone              192 drivers/md/dm-zoned-target.c 			ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
zone              198 drivers/md/dm-zoned-target.c 				rzone = zone;
zone              240 drivers/md/dm-zoned-target.c 				   struct dm_zone *zone, struct bio *bio,
zone              245 drivers/md/dm-zoned-target.c 	struct dm_zone *bzone = zone->bzone;
zone              248 drivers/md/dm-zoned-target.c 	if (dmz_is_readonly(zone))
zone              252 drivers/md/dm-zoned-target.c 	ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
zone              260 drivers/md/dm-zoned-target.c 	ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
zone              273 drivers/md/dm-zoned-target.c 				     struct dm_zone *zone, struct bio *bio,
zone              282 drivers/md/dm-zoned-target.c 	bzone = dmz_get_chunk_buffer(zmd, zone);
zone              299 drivers/md/dm-zoned-target.c 	if (ret == 0 && chunk_block < zone->wp_block)
zone              300 drivers/md/dm-zoned-target.c 		ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
zone              308 drivers/md/dm-zoned-target.c static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
zone              314 drivers/md/dm-zoned-target.c 	if (!zone)
zone              319 drivers/md/dm-zoned-target.c 		      (dmz_is_rnd(zone) ? "RND" : "SEQ"),
zone              320 drivers/md/dm-zoned-target.c 		      dmz_id(dmz->metadata, zone),
zone              323 drivers/md/dm-zoned-target.c 	if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
zone              329 drivers/md/dm-zoned-target.c 		return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
zone              336 drivers/md/dm-zoned-target.c 	return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
zone              342 drivers/md/dm-zoned-target.c static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
zone              352 drivers/md/dm-zoned-target.c 	if (!zone)
zone              355 drivers/md/dm-zoned-target.c 	if (dmz_is_readonly(zone))
zone              360 drivers/md/dm-zoned-target.c 		      dmz_id(zmd, zone),
zone              367 drivers/md/dm-zoned-target.c 	if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
zone              368 drivers/md/dm-zoned-target.c 		ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
zone              369 drivers/md/dm-zoned-target.c 	if (ret == 0 && zone->bzone)
zone              370 drivers/md/dm-zoned-target.c 		ret = dmz_invalidate_blocks(zmd, zone->bzone,
zone              383 drivers/md/dm-zoned-target.c 	struct dm_zone *zone;
zone              405 drivers/md/dm-zoned-target.c 	zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
zone              407 drivers/md/dm-zoned-target.c 	if (IS_ERR(zone)) {
zone              408 drivers/md/dm-zoned-target.c 		ret = PTR_ERR(zone);
zone              413 drivers/md/dm-zoned-target.c 	if (zone) {
zone              414 drivers/md/dm-zoned-target.c 		dmz_activate_zone(zone);
zone              415 drivers/md/dm-zoned-target.c 		bioctx->zone = zone;
zone              420 drivers/md/dm-zoned-target.c 		ret = dmz_handle_read(dmz, zone, bio);
zone              423 drivers/md/dm-zoned-target.c 		ret = dmz_handle_write(dmz, zone, bio);
zone              427 drivers/md/dm-zoned-target.c 		ret = dmz_handle_discard(dmz, zone, bio);
zone              439 drivers/md/dm-zoned-target.c 	if (zone)
zone              440 drivers/md/dm-zoned-target.c 		dmz_put_chunk_mapping(zmd, zone);
zone              648 drivers/md/dm-zoned-target.c 	bioctx->zone = NULL;
zone              179 drivers/md/dm-zoned.h unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone);
zone              180 drivers/md/dm-zoned.h sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
zone              181 drivers/md/dm-zoned.h sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
zone              188 drivers/md/dm-zoned.h void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
zone              190 drivers/md/dm-zoned.h void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
zone              192 drivers/md/dm-zoned.h void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
zone              199 drivers/md/dm-zoned.h static inline void dmz_activate_zone(struct dm_zone *zone)
zone              201 drivers/md/dm-zoned.h 	atomic_inc(&zone->refcount);
zone              208 drivers/md/dm-zoned.h static inline void dmz_deactivate_zone(struct dm_zone *zone)
zone              210 drivers/md/dm-zoned.h 	atomic_dec(&zone->refcount);
zone              216 drivers/md/dm-zoned.h static inline bool dmz_is_active(struct dm_zone *zone)
zone              218 drivers/md/dm-zoned.h 	return atomic_read(&zone->refcount);
zone              221 drivers/md/dm-zoned.h int dmz_lock_zone_reclaim(struct dm_zone *zone);
zone              222 drivers/md/dm-zoned.h void dmz_unlock_zone_reclaim(struct dm_zone *zone);
zone              227 drivers/md/dm-zoned.h void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone);
zone              231 drivers/md/dm-zoned.h int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
zone              233 drivers/md/dm-zoned.h int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
zone              235 drivers/md/dm-zoned.h int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
zone              237 drivers/md/dm-zoned.h int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
zone             1227 drivers/md/dm.c 	struct blk_zone *zone;
zone             1238 drivers/md/dm.c 		zone = zones + i;
zone             1239 drivers/md/dm.c 		if (zone->start >= start + ti->len) {
zone             1240 drivers/md/dm.c 			memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
zone             1244 drivers/md/dm.c 		zone->start = zone->start + ti->begin - start;
zone             1245 drivers/md/dm.c 		if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
zone             1248 drivers/md/dm.c 		if (zone->cond == BLK_ZONE_COND_FULL)
zone             1249 drivers/md/dm.c 			zone->wp = zone->start + zone->len;
zone             1250 drivers/md/dm.c 		else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone             1251 drivers/md/dm.c 			zone->wp = zone->start;
zone             1253 drivers/md/dm.c 			zone->wp = zone->wp + ti->begin - start;
zone               85 drivers/md/raid0.c 	struct strip_zone *zone;
zone              189 drivers/md/raid0.c 	zone = &conf->strip_zone[0];
zone              237 drivers/md/raid0.c 	zone->nb_dev = cnt;
zone              238 drivers/md/raid0.c 	zone->zone_end = smallest->sectors * cnt;
zone              240 drivers/md/raid0.c 	curr_zone_end = zone->zone_end;
zone              247 drivers/md/raid0.c 		zone = conf->strip_zone + i;
zone              251 drivers/md/raid0.c 		zone->dev_start = smallest->sectors;
zone              257 drivers/md/raid0.c 			if (rdev->sectors <= zone->dev_start) {
zone              277 drivers/md/raid0.c 		zone->nb_dev = c;
zone              278 drivers/md/raid0.c 		sectors = (smallest->sectors - zone->dev_start) * c;
zone              281 drivers/md/raid0.c 			 zone->nb_dev, (unsigned long long)sectors);
zone              284 drivers/md/raid0.c 		zone->zone_end = curr_zone_end;
zone              326 drivers/md/raid0.c static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
zone              343 drivers/md/raid0.c 		sector_div(chunk, zone->nb_dev << chunksect_bits);
zone              347 drivers/md/raid0.c 		sector_div(chunk, chunk_sects * zone->nb_dev);
zone              355 drivers/md/raid0.c 	return conf->devlist[(zone - conf->strip_zone)*raid_disks
zone              356 drivers/md/raid0.c 			     + sector_div(sector, zone->nb_dev)];
zone              480 drivers/md/raid0.c 	struct strip_zone *zone;
zone              491 drivers/md/raid0.c 	zone = find_zone(conf, &start);
zone              493 drivers/md/raid0.c 	if (bio_end_sector(bio) > zone->zone_end) {
zone              495 drivers/md/raid0.c 			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
zone              500 drivers/md/raid0.c 		end = zone->zone_end;
zone              504 drivers/md/raid0.c 	if (zone != conf->strip_zone)
zone              505 drivers/md/raid0.c 		end = end - zone[-1].zone_end;
zone              508 drivers/md/raid0.c 	stripe_size = zone->nb_dev * mddev->chunk_sectors;
zone              526 drivers/md/raid0.c 	for (disk = 0; disk < zone->nb_dev; disk++) {
zone              549 drivers/md/raid0.c 		rdev = conf->devlist[(zone - conf->strip_zone) *
zone              552 drivers/md/raid0.c 			dev_start + zone->dev_start + rdev->data_offset,
zone              570 drivers/md/raid0.c 	struct strip_zone *zone;
zone              608 drivers/md/raid0.c 	zone = find_zone(mddev->private, &sector);
zone              611 drivers/md/raid0.c 		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
zone              614 drivers/md/raid0.c 		tmp_dev = map_sector(mddev, zone, sector, &sector);
zone              628 drivers/md/raid0.c 	bio->bi_iter.bi_sector = sector + zone->dev_start +
zone              170 drivers/memstick/core/ms_block.c 	int zone = msb_get_zone_from_pba(pba);
zone              184 drivers/memstick/core/ms_block.c 	msb->free_block_count[zone]--;
zone              190 drivers/memstick/core/ms_block.c 	int zone = msb_get_zone_from_pba(pba);
zone              203 drivers/memstick/core/ms_block.c 	msb->free_block_count[zone]++;
zone             1074 drivers/memstick/core/ms_block.c static u16 msb_get_free_block(struct msb_data *msb, int zone)
zone             1077 drivers/memstick/core/ms_block.c 	int pba = zone * MS_BLOCKS_IN_ZONE;
zone             1082 drivers/memstick/core/ms_block.c 	if (!msb->free_block_count[zone]) {
zone             1083 drivers/memstick/core/ms_block.c 		pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
zone             1088 drivers/memstick/core/ms_block.c 	pos %= msb->free_block_count[zone];
zone             1091 drivers/memstick/core/ms_block.c 		msb->free_block_count[zone], pos);
zone             1101 drivers/memstick/core/ms_block.c 	if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
zone              192 drivers/mtd/sm_ftl.c static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
zone              195 drivers/mtd/sm_ftl.c 	WARN_ON(zone < 0 || zone >= ftl->zone_count);
zone              202 drivers/mtd/sm_ftl.c 	return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
zone              207 drivers/mtd/sm_ftl.c 			    int *zone, int *block, int *boffset)
zone              212 drivers/mtd/sm_ftl.c 	*zone = offset >= ftl->zone_count ? -1 : offset;
zone              239 drivers/mtd/sm_ftl.c 			  int zone, int block, int boffset,
zone              269 drivers/mtd/sm_ftl.c 		if (zone == 0 && block == ftl->cis_block && boffset ==
zone              280 drivers/mtd/sm_ftl.c 	ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
zone              285 drivers/mtd/sm_ftl.c 			block, zone, ret);
zone              303 drivers/mtd/sm_ftl.c 			" as bad" , block, zone);
zone              312 drivers/mtd/sm_ftl.c 			block, zone);
zone              321 drivers/mtd/sm_ftl.c 			   int zone, int block, int boffset,
zone              330 drivers/mtd/sm_ftl.c 	if (zone == 0 && (block == ftl->cis_block || block == 0)) {
zone              345 drivers/mtd/sm_ftl.c 	ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
zone              351 drivers/mtd/sm_ftl.c 			block, zone, ret);
zone              368 drivers/mtd/sm_ftl.c 			  int zone, int block, int lba,
zone              391 drivers/mtd/sm_ftl.c 				boffset / SM_SECTOR_SIZE, lba, zone);
zone              405 drivers/mtd/sm_ftl.c 		if (!sm_write_sector(ftl, zone, block, boffset,
zone              417 drivers/mtd/sm_ftl.c 			if (sm_erase_block(ftl, zone, block, 0))
zone              423 drivers/mtd/sm_ftl.c 			sm_mark_block_bad(ftl, zone, block);
zone              432 drivers/mtd/sm_ftl.c static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
zone              446 drivers/mtd/sm_ftl.c 	sm_printk("marking block %d of zone %d as bad", block, zone);
zone              452 drivers/mtd/sm_ftl.c 		sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
zone              462 drivers/mtd/sm_ftl.c 	struct ftl_zone *zone = &ftl->zones[zone_num];
zone              486 drivers/mtd/sm_ftl.c 		kfifo_in(&zone->free_sectors,
zone              496 drivers/mtd/sm_ftl.c static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
zone              512 drivers/mtd/sm_ftl.c 		if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
zone              527 drivers/mtd/sm_ftl.c 		sm_erase_block(ftl, zone, block, 1);
zone              746 drivers/mtd/sm_ftl.c 	struct ftl_zone *zone = &ftl->zones[zone_num];
zone              756 drivers/mtd/sm_ftl.c 	zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
zone              758 drivers/mtd/sm_ftl.c 	if (!zone->lba_to_phys_table)
zone              760 drivers/mtd/sm_ftl.c 	memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
zone              764 drivers/mtd/sm_ftl.c 	if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
zone              765 drivers/mtd/sm_ftl.c 		kfree(zone->lba_to_phys_table);
zone              778 drivers/mtd/sm_ftl.c 			kfifo_free(&zone->free_sectors);
zone              779 drivers/mtd/sm_ftl.c 			kfree(zone->lba_to_phys_table);
zone              786 drivers/mtd/sm_ftl.c 			kfifo_in(&zone->free_sectors,
zone              814 drivers/mtd/sm_ftl.c 		if (zone->lba_to_phys_table[lba] < 0) {
zone              816 drivers/mtd/sm_ftl.c 			zone->lba_to_phys_table[lba] = block;
zone              822 drivers/mtd/sm_ftl.c 			lba, zone->lba_to_phys_table[lba], block, zone_num);
zone              830 drivers/mtd/sm_ftl.c 					zone->lba_to_phys_table[lba])) {
zone              831 drivers/mtd/sm_ftl.c 			zone->lba_to_phys_table[lba] = block;
zone              844 drivers/mtd/sm_ftl.c 	zone->initialized = 1;
zone              848 drivers/mtd/sm_ftl.c 	if (!kfifo_len(&zone->free_sectors)) {
zone              855 drivers/mtd/sm_ftl.c 	i %= (kfifo_len(&zone->free_sectors) / 2);
zone              858 drivers/mtd/sm_ftl.c 		len = kfifo_out(&zone->free_sectors,
zone              861 drivers/mtd/sm_ftl.c 		kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
zone              869 drivers/mtd/sm_ftl.c 	struct ftl_zone *zone;
zone              873 drivers/mtd/sm_ftl.c 	zone = &ftl->zones[zone_num];
zone              875 drivers/mtd/sm_ftl.c 	if (!zone->initialized) {
zone              881 drivers/mtd/sm_ftl.c 	return zone;
zone              919 drivers/mtd/sm_ftl.c 	struct ftl_zone *zone;
zone              933 drivers/mtd/sm_ftl.c 	zone = &ftl->zones[zone_num];
zone              934 drivers/mtd/sm_ftl.c 	block_num = zone->lba_to_phys_table[ftl->cache_block];
zone              956 drivers/mtd/sm_ftl.c 	if (kfifo_out(&zone->free_sectors,
zone              968 drivers/mtd/sm_ftl.c 	zone->lba_to_phys_table[ftl->cache_block] = write_sector;
zone             1003 drivers/mtd/sm_ftl.c 	struct ftl_zone *zone;
zone             1011 drivers/mtd/sm_ftl.c 	zone = sm_get_zone(ftl, zone_num);
zone             1012 drivers/mtd/sm_ftl.c 	if (IS_ERR(zone)) {
zone             1013 drivers/mtd/sm_ftl.c 		error = PTR_ERR(zone);
zone             1025 drivers/mtd/sm_ftl.c 	block = zone->lba_to_phys_table[block];
zone             1049 drivers/mtd/sm_ftl.c 	struct ftl_zone *zone;
zone             1059 drivers/mtd/sm_ftl.c 	zone = sm_get_zone(ftl, zone_num);
zone             1060 drivers/mtd/sm_ftl.c 	if (IS_ERR(zone)) {
zone             1061 drivers/mtd/sm_ftl.c 		error = PTR_ERR(zone);
zone             6939 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h 	__le16	zone;
zone              250 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
zone              252 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (NULL == zone)
zone              255 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone->flags = flags;
zone              256 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone->bitmap = bitmap;
zone              257 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
zone              258 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone->priority = priority;
zone              259 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone->offset = offset;
zone              263 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone->uid = zone_alloc->last_uid++;
zone              264 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone->allocator = zone_alloc;
zone              274 drivers/net/ethernet/mellanox/mlx4/alloc.c 		list_add_tail(&zone->prio_list, &it->prio_list);
zone              275 drivers/net/ethernet/mellanox/mlx4/alloc.c 	list_add_tail(&zone->list, &it->list);
zone              279 drivers/net/ethernet/mellanox/mlx4/alloc.c 	*puid = zone->uid;
zone              321 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone, *tmp;
zone              325 drivers/net/ethernet/mellanox/mlx4/alloc.c 	list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
zone              326 drivers/net/ethernet/mellanox/mlx4/alloc.c 		list_del(&zone->list);
zone              327 drivers/net/ethernet/mellanox/mlx4/alloc.c 		list_del(&zone->prio_list);
zone              328 drivers/net/ethernet/mellanox/mlx4/alloc.c 		kfree(zone);
zone              336 drivers/net/ethernet/mellanox/mlx4/alloc.c static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
zone              341 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_allocator *zone_alloc = zone->allocator;
zone              344 drivers/net/ethernet/mellanox/mlx4/alloc.c 	res = mlx4_bitmap_alloc_range(zone->bitmap, count,
zone              348 drivers/net/ethernet/mellanox/mlx4/alloc.c 		res += zone->offset;
zone              349 drivers/net/ethernet/mellanox/mlx4/alloc.c 		uid = zone->uid;
zone              354 drivers/net/ethernet/mellanox/mlx4/alloc.c 		if (unlikely(curr_node->priority == zone->priority))
zone              358 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
zone              372 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
zone              376 drivers/net/ethernet/mellanox/mlx4/alloc.c 			if (unlikely(it == zone))
zone              392 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
zone              418 drivers/net/ethernet/mellanox/mlx4/alloc.c static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
zone              421 drivers/net/ethernet/mellanox/mlx4/alloc.c 	mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
zone              428 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone;
zone              430 drivers/net/ethernet/mellanox/mlx4/alloc.c 	list_for_each_entry(zone, &zones->entries, list) {
zone              431 drivers/net/ethernet/mellanox/mlx4/alloc.c 		if (zone->uid == uid)
zone              432 drivers/net/ethernet/mellanox/mlx4/alloc.c 			return zone;
zone              440 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone;
zone              445 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone = __mlx4_find_zone_by_uid(zones, uid);
zone              447 drivers/net/ethernet/mellanox/mlx4/alloc.c 	bitmap = zone == NULL ? NULL : zone->bitmap;
zone              456 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone;
zone              461 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone = __mlx4_find_zone_by_uid(zones, uid);
zone              463 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (NULL == zone) {
zone              468 drivers/net/ethernet/mellanox/mlx4/alloc.c 	__mlx4_zone_remove_one_entry(zone);
zone              472 drivers/net/ethernet/mellanox/mlx4/alloc.c 	kfree(zone);
zone              481 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone, *zone_candidate = NULL;
zone              490 drivers/net/ethernet/mellanox/mlx4/alloc.c 	list_for_each_entry(zone, &zones->entries, list) {
zone              491 drivers/net/ethernet/mellanox/mlx4/alloc.c 		if (obj >= zone->offset) {
zone              492 drivers/net/ethernet/mellanox/mlx4/alloc.c 			u32 mobj = (obj - zone->offset) & zones->mask;
zone              494 drivers/net/ethernet/mellanox/mlx4/alloc.c 			if (mobj < zone->bitmap->max) {
zone              495 drivers/net/ethernet/mellanox/mlx4/alloc.c 				u32 curr_dist = zone->bitmap->effective_len;
zone              499 drivers/net/ethernet/mellanox/mlx4/alloc.c 					zone_candidate = zone;
zone              511 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone;
zone              516 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone = __mlx4_find_zone_by_uid(zones, uid);
zone              518 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (NULL == zone)
zone              521 drivers/net/ethernet/mellanox/mlx4/alloc.c 	res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
zone              531 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone;
zone              536 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone = __mlx4_find_zone_by_uid(zones, uid);
zone              538 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (NULL == zone) {
zone              543 drivers/net/ethernet/mellanox/mlx4/alloc.c 	__mlx4_free_from_zone(zone, obj, count);
zone              553 drivers/net/ethernet/mellanox/mlx4/alloc.c 	struct mlx4_zone_entry *zone;
zone              561 drivers/net/ethernet/mellanox/mlx4/alloc.c 	zone = __mlx4_find_zone_by_uid_unique(zones, obj);
zone              563 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (NULL == zone) {
zone              568 drivers/net/ethernet/mellanox/mlx4/alloc.c 	__mlx4_free_from_zone(zone, obj, count);
zone              708 drivers/net/fjes/fjes_hw.c 		(hw->ep_shm_info[hw->my_epid].zone ==
zone              712 drivers/net/fjes/fjes_hw.c 		return (hw->ep_shm_info[epid].zone ==
zone              713 drivers/net/fjes/fjes_hw.c 				hw->ep_shm_info[hw->my_epid].zone);
zone              962 drivers/net/fjes/fjes_hw.c 	struct my_s {u8 es_status; u8 zone; } *info;
zone             1001 drivers/net/fjes/fjes_hw.c 				hw->ep_shm_info[epidx].zone =
zone             1002 drivers/net/fjes/fjes_hw.c 					info[epidx].zone;
zone             1010 drivers/net/fjes/fjes_hw.c 				if ((info[epidx].zone !=
zone             1014 drivers/net/fjes/fjes_hw.c 				    (info[epidx].zone ==
zone             1015 drivers/net/fjes/fjes_hw.c 					info[hw->my_epid].zone))
zone             1023 drivers/net/fjes/fjes_hw.c 				if ((info[epidx].zone ==
zone             1027 drivers/net/fjes/fjes_hw.c 				    (info[epidx].zone !=
zone             1028 drivers/net/fjes/fjes_hw.c 					info[hw->my_epid].zone)) {
zone             1037 drivers/net/fjes/fjes_hw.c 				if ((info[epidx].zone ==
zone             1041 drivers/net/fjes/fjes_hw.c 				    (info[epidx].zone !=
zone             1042 drivers/net/fjes/fjes_hw.c 					info[hw->my_epid].zone))
zone             1049 drivers/net/fjes/fjes_hw.c 			hw->ep_shm_info[epidx].zone = info[epidx].zone;
zone              144 drivers/net/fjes/fjes_hw.h 			u8 zone;
zone              260 drivers/net/fjes/fjes_hw.h 	u8 zone;
zone              370 drivers/net/fjes/fjes_main.c 			hw->ep_shm_info[epidx].zone =
zone              371 drivers/net/fjes/fjes_main.c 			    hw->hw_info.res_buf->info.info[epidx].zone;
zone               56 drivers/net/fjes/fjes_trace.h 		__dynamic_array(u8, zone, hw->max_epid)
zone               65 drivers/net/fjes/fjes_trace.h 			*((u8 *)__get_dynamic_array(zone) + x) =
zone               66 drivers/net/fjes/fjes_trace.h 					res_buf->info.info[x].zone;
zone               73 drivers/net/fjes/fjes_trace.h 		  __print_array(__get_dynamic_array(zone),
zone               74 drivers/net/fjes/fjes_trace.h 				__get_dynamic_array_len(zone) / sizeof(u8),
zone               54 drivers/net/wireless/rsi/rsi_91x_main.c void rsi_dbg(u32 zone, const char *fmt, ...)
zone               64 drivers/net/wireless/rsi/rsi_91x_main.c 	if (zone & rsi_zone_enabled)
zone               62 drivers/net/wireless/rsi/rsi_main.h extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
zone              103 drivers/platform/chrome/wilco_ec/telemetry.c 	u8 zone;
zone              239 drivers/platform/x86/alienware-wmi.c static int parse_rgb(const char *buf, struct platform_zone *zone)
zone              259 drivers/platform/x86/alienware-wmi.c 	zone->colors = repackager.cp;
zone              265 drivers/platform/x86/alienware-wmi.c 	u8 zone;
zone              267 drivers/platform/x86/alienware-wmi.c 	for (zone = 0; zone < quirks->num_zones; zone++) {
zone              268 drivers/platform/x86/alienware-wmi.c 		if ((struct device_attribute *)zone_data[zone].attr == attr) {
zone              270 drivers/platform/x86/alienware-wmi.c 				 zone_data[zone].location);
zone              271 drivers/platform/x86/alienware-wmi.c 			return &zone_data[zone];
zone              280 drivers/platform/x86/alienware-wmi.c static int alienware_update_led(struct platform_zone *zone)
zone              289 drivers/platform/x86/alienware-wmi.c 		wmax_basic_args.led_mask = 1 << zone->location;
zone              290 drivers/platform/x86/alienware-wmi.c 		wmax_basic_args.colors = zone->colors;
zone              298 drivers/platform/x86/alienware-wmi.c 		legacy_args.colors = zone->colors;
zone              307 drivers/platform/x86/alienware-wmi.c 		method_id = zone->location + 1;
zone              431 drivers/platform/x86/alienware-wmi.c 	u8 zone;
zone              468 drivers/platform/x86/alienware-wmi.c 	for (zone = 0; zone < quirks->num_zones; zone++) {
zone              469 drivers/platform/x86/alienware-wmi.c 		sprintf(buffer, "zone%02hhX", zone);
zone              473 drivers/platform/x86/alienware-wmi.c 		sysfs_attr_init(&zone_dev_attrs[zone].attr);
zone              474 drivers/platform/x86/alienware-wmi.c 		zone_dev_attrs[zone].attr.name = name;
zone              475 drivers/platform/x86/alienware-wmi.c 		zone_dev_attrs[zone].attr.mode = 0644;
zone              476 drivers/platform/x86/alienware-wmi.c 		zone_dev_attrs[zone].show = zone_show;
zone              477 drivers/platform/x86/alienware-wmi.c 		zone_dev_attrs[zone].store = zone_set;
zone              478 drivers/platform/x86/alienware-wmi.c 		zone_data[zone].location = zone;
zone              479 drivers/platform/x86/alienware-wmi.c 		zone_attrs[zone] = &zone_dev_attrs[zone].attr;
zone              480 drivers/platform/x86/alienware-wmi.c 		zone_data[zone].attr = &zone_dev_attrs[zone];
zone              492 drivers/platform/x86/alienware-wmi.c 	u8 zone;
zone              497 drivers/platform/x86/alienware-wmi.c 		for (zone = 0; zone < quirks->num_zones; zone++)
zone              498 drivers/platform/x86/alienware-wmi.c 			kfree(zone_dev_attrs[zone].attr.name);
zone               31 drivers/scsi/sd_zbc.c 				struct blk_zone *zone)
zone               35 drivers/scsi/sd_zbc.c 	memset(zone, 0, sizeof(struct blk_zone));
zone               37 drivers/scsi/sd_zbc.c 	zone->type = buf[0] & 0x0f;
zone               38 drivers/scsi/sd_zbc.c 	zone->cond = (buf[1] >> 4) & 0xf;
zone               40 drivers/scsi/sd_zbc.c 		zone->reset = 1;
zone               42 drivers/scsi/sd_zbc.c 		zone->non_seq = 1;
zone               44 drivers/scsi/sd_zbc.c 	zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
zone               45 drivers/scsi/sd_zbc.c 	zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
zone               46 drivers/scsi/sd_zbc.c 	zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
zone               47 drivers/scsi/sd_zbc.c 	if (zone->type != ZBC_ZONE_TYPE_CONV &&
zone               48 drivers/scsi/sd_zbc.c 	    zone->cond == ZBC_ZONE_COND_FULL)
zone               49 drivers/scsi/sd_zbc.c 		zone->wp = zone->start + zone->len;
zone              540 drivers/staging/rts5208/rtsx_chip.h 	struct zone_entry *zone;
zone              800 drivers/staging/rts5208/xd.c 	xd_card->zone = vmalloc(size);
zone              801 drivers/staging/rts5208/xd.c 	if (!xd_card->zone)
zone              805 drivers/staging/rts5208/xd.c 		xd_card->zone[i].build_flag = 0;
zone              806 drivers/staging/rts5208/xd.c 		xd_card->zone[i].l2p_table = NULL;
zone              807 drivers/staging/rts5208/xd.c 		xd_card->zone[i].free_table = NULL;
zone              808 drivers/staging/rts5208/xd.c 		xd_card->zone[i].get_index = 0;
zone              809 drivers/staging/rts5208/xd.c 		xd_card->zone[i].set_index = 0;
zone              810 drivers/staging/rts5208/xd.c 		xd_card->zone[i].unused_blk_cnt = 0;
zone              816 drivers/staging/rts5208/xd.c static inline void free_zone(struct zone_entry *zone)
zone              818 drivers/staging/rts5208/xd.c 	if (!zone)
zone              821 drivers/staging/rts5208/xd.c 	zone->build_flag = 0;
zone              822 drivers/staging/rts5208/xd.c 	zone->set_index = 0;
zone              823 drivers/staging/rts5208/xd.c 	zone->get_index = 0;
zone              824 drivers/staging/rts5208/xd.c 	zone->unused_blk_cnt = 0;
zone              825 drivers/staging/rts5208/xd.c 	vfree(zone->l2p_table);
zone              826 drivers/staging/rts5208/xd.c 	zone->l2p_table = NULL;
zone              827 drivers/staging/rts5208/xd.c 	vfree(zone->free_table);
zone              828 drivers/staging/rts5208/xd.c 	zone->free_table = NULL;
zone              834 drivers/staging/rts5208/xd.c 	struct zone_entry *zone;
zone              843 drivers/staging/rts5208/xd.c 	zone = &xd_card->zone[zone_no];
zone              845 drivers/staging/rts5208/xd.c 	if (!zone->free_table) {
zone              850 drivers/staging/rts5208/xd.c 	if ((zone->set_index >= XD_FREE_TABLE_CNT) ||
zone              851 drivers/staging/rts5208/xd.c 	    (zone->set_index < 0)) {
zone              852 drivers/staging/rts5208/xd.c 		free_zone(zone);
zone              858 drivers/staging/rts5208/xd.c 		zone->set_index);
zone              860 drivers/staging/rts5208/xd.c 	zone->free_table[zone->set_index++] = (u16)(phy_blk & 0x3ff);
zone              861 drivers/staging/rts5208/xd.c 	if (zone->set_index >= XD_FREE_TABLE_CNT)
zone              862 drivers/staging/rts5208/xd.c 		zone->set_index = 0;
zone              863 drivers/staging/rts5208/xd.c 	zone->unused_blk_cnt++;
zone              869 drivers/staging/rts5208/xd.c 	struct zone_entry *zone;
zone              877 drivers/staging/rts5208/xd.c 	zone = &xd_card->zone[zone_no];
zone              879 drivers/staging/rts5208/xd.c 	if ((zone->unused_blk_cnt == 0) ||
zone              880 drivers/staging/rts5208/xd.c 	    (zone->set_index == zone->get_index)) {
zone              881 drivers/staging/rts5208/xd.c 		free_zone(zone);
zone              885 drivers/staging/rts5208/xd.c 	if ((zone->get_index >= XD_FREE_TABLE_CNT) || (zone->get_index < 0)) {
zone              886 drivers/staging/rts5208/xd.c 		free_zone(zone);
zone              892 drivers/staging/rts5208/xd.c 		zone->get_index);
zone              894 drivers/staging/rts5208/xd.c 	phy_blk = zone->free_table[zone->get_index];
zone              895 drivers/staging/rts5208/xd.c 	zone->free_table[zone->get_index++] = 0xFFFF;
zone              896 drivers/staging/rts5208/xd.c 	if (zone->get_index >= XD_FREE_TABLE_CNT)
zone              897 drivers/staging/rts5208/xd.c 		zone->get_index = 0;
zone              898 drivers/staging/rts5208/xd.c 	zone->unused_blk_cnt--;
zone              908 drivers/staging/rts5208/xd.c 	struct zone_entry *zone;
zone              910 drivers/staging/rts5208/xd.c 	zone = &xd_card->zone[zone_no];
zone              911 drivers/staging/rts5208/xd.c 	zone->l2p_table[log_off] = phy_off;
zone              917 drivers/staging/rts5208/xd.c 	struct zone_entry *zone;
zone              920 drivers/staging/rts5208/xd.c 	zone = &xd_card->zone[zone_no];
zone              921 drivers/staging/rts5208/xd.c 	if (zone->l2p_table[log_off] == 0xFFFF) {
zone              934 drivers/staging/rts5208/xd.c 		if (zone->unused_blk_cnt <= 0) {
zone              939 drivers/staging/rts5208/xd.c 		for (i = 0; i < zone->unused_blk_cnt; i++) {
zone              951 drivers/staging/rts5208/xd.c 		if (i >= zone->unused_blk_cnt) {
zone              960 drivers/staging/rts5208/xd.c 	return (u32)zone->l2p_table[log_off] + ((u32)(zone_no) << 10);
zone             1296 drivers/staging/rts5208/xd.c 	struct zone_entry *zone;
zone             1305 drivers/staging/rts5208/xd.c 	if (!xd_card->zone) {
zone             1311 drivers/staging/rts5208/xd.c 	if (xd_card->zone[zone_no].build_flag) {
zone             1317 drivers/staging/rts5208/xd.c 	zone = &xd_card->zone[zone_no];
zone             1319 drivers/staging/rts5208/xd.c 	if (!zone->l2p_table) {
zone             1320 drivers/staging/rts5208/xd.c 		zone->l2p_table = vmalloc(2000);
zone             1321 drivers/staging/rts5208/xd.c 		if (!zone->l2p_table)
zone             1324 drivers/staging/rts5208/xd.c 	memset((u8 *)(zone->l2p_table), 0xff, 2000);
zone             1326 drivers/staging/rts5208/xd.c 	if (!zone->free_table) {
zone             1327 drivers/staging/rts5208/xd.c 		zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
zone             1328 drivers/staging/rts5208/xd.c 		if (!zone->free_table)
zone             1331 drivers/staging/rts5208/xd.c 	memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
zone             1354 drivers/staging/rts5208/xd.c 	zone->set_index = 0;
zone             1355 drivers/staging/rts5208/xd.c 	zone->get_index = 0;
zone             1356 drivers/staging/rts5208/xd.c 	zone->unused_blk_cnt = 0;
zone             1390 drivers/staging/rts5208/xd.c 		if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
zone             1391 drivers/staging/rts5208/xd.c 			zone->l2p_table[cur_fst_page_logoff] = (u16)(i & 0x3FF);
zone             1395 drivers/staging/rts5208/xd.c 		phy_block = zone->l2p_table[cur_fst_page_logoff] +
zone             1419 drivers/staging/rts5208/xd.c 				zone->l2p_table[cur_fst_page_logoff] =
zone             1429 drivers/staging/rts5208/xd.c 				zone->l2p_table[cur_fst_page_logoff] =
zone             1454 drivers/staging/rts5208/xd.c 		if (zone->l2p_table[start] == 0xFFFF)
zone             1461 drivers/staging/rts5208/xd.c 		zone->unused_blk_cnt);
zone             1463 drivers/staging/rts5208/xd.c 	if ((zone->unused_blk_cnt - i) < 1)
zone             1466 drivers/staging/rts5208/xd.c 	zone->build_flag = 1;
zone             1471 drivers/staging/rts5208/xd.c 	vfree(zone->l2p_table);
zone             1472 drivers/staging/rts5208/xd.c 	zone->l2p_table = NULL;
zone             1473 drivers/staging/rts5208/xd.c 	vfree(zone->free_table);
zone             1474 drivers/staging/rts5208/xd.c 	zone->free_table = NULL;
zone             1867 drivers/staging/rts5208/xd.c 	if (xd_card->zone[zone_no].build_flag == 0) {
zone             2005 drivers/staging/rts5208/xd.c 		if (xd_card->zone[zone_no].build_flag == 0) {
zone             2078 drivers/staging/rts5208/xd.c 	if (xd_card->zone) {
zone             2080 drivers/staging/rts5208/xd.c 			vfree(xd_card->zone[i].l2p_table);
zone             2081 drivers/staging/rts5208/xd.c 			xd_card->zone[i].l2p_table = NULL;
zone             2082 drivers/staging/rts5208/xd.c 			vfree(xd_card->zone[i].free_table);
zone             2083 drivers/staging/rts5208/xd.c 			xd_card->zone[i].free_table = NULL;
zone             2085 drivers/staging/rts5208/xd.c 		vfree(xd_card->zone);
zone             2086 drivers/staging/rts5208/xd.c 		xd_card->zone = NULL;
zone              256 drivers/staging/uwb/drp-ie.c void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
zone              264 drivers/staging/uwb/drp-ie.c 			set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
zone              289 drivers/staging/uwb/drp-ie.c 	u8 zone;
zone              298 drivers/staging/uwb/drp-ie.c 		for (zone = 0; zone < UWB_NUM_ZONES; zone++)   {
zone              299 drivers/staging/uwb/drp-ie.c 			zone_mask = 1 << zone;
zone              301 drivers/staging/uwb/drp-ie.c 				uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
zone               51 drivers/thermal/da9062-thermal.c 	struct thermal_zone_device *zone;
zone               96 drivers/thermal/da9062-thermal.c 		thermal_zone_device_update(thermal->zone,
zone               99 drivers/thermal/da9062-thermal.c 		delay = msecs_to_jiffies(thermal->zone->passive_delay);
zone              107 drivers/thermal/da9062-thermal.c 	thermal_zone_device_update(thermal->zone,
zone              242 drivers/thermal/da9062-thermal.c 	thermal->zone = thermal_zone_device_register(thermal->config->name,
zone              246 drivers/thermal/da9062-thermal.c 	if (IS_ERR(thermal->zone)) {
zone              248 drivers/thermal/da9062-thermal.c 		ret = PTR_ERR(thermal->zone);
zone              254 drivers/thermal/da9062-thermal.c 		thermal->zone->passive_delay);
zone              277 drivers/thermal/da9062-thermal.c 	thermal_zone_device_unregister(thermal->zone);
zone              288 drivers/thermal/da9062-thermal.c 	thermal_zone_device_unregister(thermal->zone);
zone               13 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
zone               16 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 	struct int34x_thermal_zone *d = zone->devdata;
zone               21 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 		return d->override_ops->get_temp(zone, temp);
zone               42 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
zone               45 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 	struct int34x_thermal_zone *d = zone->devdata;
zone               49 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 		return d->override_ops->get_trip_temp(zone, trip, temp);
zone               74 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
zone               78 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 	struct int34x_thermal_zone *d = zone->devdata;
zone               82 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 		return d->override_ops->get_trip_type(zone, trip, type);
zone              107 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
zone              110 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 	struct int34x_thermal_zone *d = zone->devdata;
zone              115 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 		return d->override_ops->set_trip_temp(zone, trip, temp);
zone              129 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
zone              132 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 	struct int34x_thermal_zone *d = zone->devdata;
zone              137 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 		return d->override_ops->get_trip_hyst(zone, trip, temp);
zone              250 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 	int34x_thermal_zone->zone = thermal_zone_device_register(
zone              257 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 	if (IS_ERR(int34x_thermal_zone->zone)) {
zone              258 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 		ret = PTR_ERR(int34x_thermal_zone->zone);
zone              276 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c 	thermal_zone_device_unregister(int34x_thermal_zone->zone);
zone               31 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h 	struct thermal_zone_device *zone;
zone               58 drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h 	thermal_zone_device_update(tzone->zone, event);
zone              259 drivers/thermal/intel/int340x_thermal/processor_thermal_device.c static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
zone              413 drivers/thermal/of-thermal.c thermal_zone_of_add_sensor(struct device_node *zone,
zone              420 drivers/thermal/of-thermal.c 	tzd = thermal_zone_get_zone_by_name(zone->name);
zone             1026 drivers/thermal/of-thermal.c 		struct thermal_zone_device *zone;
zone             1062 drivers/thermal/of-thermal.c 		zone = thermal_zone_device_register(child->name, tz->ntrips,
zone             1067 drivers/thermal/of-thermal.c 		if (IS_ERR(zone)) {
zone             1069 drivers/thermal/of-thermal.c 			       PTR_ERR(zone));
zone             1109 drivers/thermal/of-thermal.c 		struct thermal_zone_device *zone;
zone             1111 drivers/thermal/of-thermal.c 		zone = thermal_zone_get_zone_by_name(child->name);
zone             1112 drivers/thermal/of-thermal.c 		if (IS_ERR(zone))
zone             1115 drivers/thermal/of-thermal.c 		thermal_zone_device_unregister(zone);
zone             1116 drivers/thermal/of-thermal.c 		kfree(zone->tzp);
zone             1117 drivers/thermal/of-thermal.c 		kfree(zone->ops);
zone             1118 drivers/thermal/of-thermal.c 		of_thermal_free_zone(zone->devdata);
zone               82 drivers/thermal/rcar_gen3_thermal.c 	struct thermal_zone_device *zone;
zone              252 drivers/thermal/rcar_gen3_thermal.c 			thermal_zone_device_update(priv->tscs[i]->zone,
zone              352 drivers/thermal/rcar_gen3_thermal.c 	struct thermal_zone_device *zone = data;
zone              354 drivers/thermal/rcar_gen3_thermal.c 	thermal_remove_hwmon_sysfs(zone);
zone              363 drivers/thermal/rcar_gen3_thermal.c 	struct thermal_zone_device *zone;
zone              432 drivers/thermal/rcar_gen3_thermal.c 		zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
zone              434 drivers/thermal/rcar_gen3_thermal.c 		if (IS_ERR(zone)) {
zone              436 drivers/thermal/rcar_gen3_thermal.c 			ret = PTR_ERR(zone);
zone              439 drivers/thermal/rcar_gen3_thermal.c 		tsc->zone = zone;
zone              441 drivers/thermal/rcar_gen3_thermal.c 		tsc->zone->tzp->no_hwmon = false;
zone              442 drivers/thermal/rcar_gen3_thermal.c 		ret = thermal_add_hwmon_sysfs(tsc->zone);
zone              446 drivers/thermal/rcar_gen3_thermal.c 		ret = devm_add_action_or_reset(dev, rcar_gen3_hwmon_action, zone);
zone              451 drivers/thermal/rcar_gen3_thermal.c 		ret = of_thermal_get_ntrips(tsc->zone);
zone               92 drivers/thermal/rcar_thermal.c 	struct thermal_zone_device *zone;
zone              105 drivers/thermal/rcar_thermal.c #define rcar_zone_to_priv(zone)		((zone)->devdata)
zone              297 drivers/thermal/rcar_thermal.c static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
zone              299 drivers/thermal/rcar_thermal.c 	struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
zone              304 drivers/thermal/rcar_thermal.c static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
zone              307 drivers/thermal/rcar_thermal.c 	struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
zone              323 drivers/thermal/rcar_thermal.c static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
zone              326 drivers/thermal/rcar_thermal.c 	struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
zone              342 drivers/thermal/rcar_thermal.c static int rcar_thermal_notify(struct thermal_zone_device *zone,
zone              345 drivers/thermal/rcar_thermal.c 	struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
zone              415 drivers/thermal/rcar_thermal.c 		thermal_zone_device_update(priv->zone,
zone              479 drivers/thermal/rcar_thermal.c 			thermal_remove_hwmon_sysfs(priv->zone);
zone              481 drivers/thermal/rcar_thermal.c 			thermal_zone_device_unregister(priv->zone);
zone              575 drivers/thermal/rcar_thermal.c 			priv->zone = devm_thermal_zone_of_sensor_register(
zone              579 drivers/thermal/rcar_thermal.c 			priv->zone = thermal_zone_device_register(
zone              584 drivers/thermal/rcar_thermal.c 		if (IS_ERR(priv->zone)) {
zone              586 drivers/thermal/rcar_thermal.c 			ret = PTR_ERR(priv->zone);
zone              587 drivers/thermal/rcar_thermal.c 			priv->zone = NULL;
zone              596 drivers/thermal/rcar_thermal.c 			priv->zone->tzp->no_hwmon = false;
zone              597 drivers/thermal/rcar_thermal.c 			ret = thermal_add_hwmon_sysfs(priv->zone);
zone              426 drivers/thermal/tegra/soctherm.c 	struct tegra_thermctl_zone *zone = data;
zone              429 drivers/thermal/tegra/soctherm.c 	val = readl(zone->reg);
zone              430 drivers/thermal/tegra/soctherm.c 	val = REG_GET_MASK(val, zone->sg->sensor_temp_mask);
zone              586 drivers/thermal/tegra/soctherm.c 	struct tegra_thermctl_zone *zone = data;
zone              587 drivers/thermal/tegra/soctherm.c 	struct thermal_zone_device *tz = zone->tz;
zone              588 drivers/thermal/tegra/soctherm.c 	struct tegra_soctherm *ts = zone->ts;
zone              589 drivers/thermal/tegra/soctherm.c 	const struct tegra_tsensor_group *sg = zone->sg;
zone              590 drivers/thermal/tegra/soctherm.c 	struct device *dev = zone->dev;
zone              638 drivers/thermal/tegra/soctherm.c 	struct tegra_thermctl_zone *zone = data;
zone              639 drivers/thermal/tegra/soctherm.c 	struct thermal_zone_device *tz = zone->tz;
zone              645 drivers/thermal/tegra/soctherm.c 	ret = tz->ops->get_trip_temp(zone->tz, trip, &trip_temp);
zone              692 drivers/thermal/tegra/soctherm.c 	struct tegra_thermctl_zone *zone = data;
zone              695 drivers/thermal/tegra/soctherm.c 	thermal_irq_disable(zone);
zone              697 drivers/thermal/tegra/soctherm.c 	r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset);
zone              699 drivers/thermal/tegra/soctherm.c 	writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
zone              701 drivers/thermal/tegra/soctherm.c 	lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain;
zone              702 drivers/thermal/tegra/soctherm.c 	hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain;
zone              703 drivers/thermal/tegra/soctherm.c 	dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo);
zone              705 drivers/thermal/tegra/soctherm.c 	r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi);
zone              706 drivers/thermal/tegra/soctherm.c 	r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo);
zone              708 drivers/thermal/tegra/soctherm.c 	writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
zone              710 drivers/thermal/tegra/soctherm.c 	thermal_irq_enable(zone);
zone             2213 drivers/thermal/tegra/soctherm.c 		struct tegra_thermctl_zone *zone =
zone             2214 drivers/thermal/tegra/soctherm.c 			devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
zone             2215 drivers/thermal/tegra/soctherm.c 		if (!zone) {
zone             2220 drivers/thermal/tegra/soctherm.c 		zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
zone             2221 drivers/thermal/tegra/soctherm.c 		zone->dev = &pdev->dev;
zone             2222 drivers/thermal/tegra/soctherm.c 		zone->sg = soc->ttgs[i];
zone             2223 drivers/thermal/tegra/soctherm.c 		zone->ts = tegra;
zone             2226 drivers/thermal/tegra/soctherm.c 							 soc->ttgs[i]->id, zone,
zone             2235 drivers/thermal/tegra/soctherm.c 		zone->tz = z;
zone               35 drivers/thermal/tegra/tegra-bpmp-thermal.c 	struct tegra_bpmp_thermal_zone *zone = data;
zone               43 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.get_temp.zone = zone->idx;
zone               52 drivers/thermal/tegra/tegra-bpmp-thermal.c 	err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
zone               63 drivers/thermal/tegra/tegra-bpmp-thermal.c 	struct tegra_bpmp_thermal_zone *zone = data;
zone               69 drivers/thermal/tegra/tegra-bpmp-thermal.c 	req.set_trip.zone = zone->idx;
zone               79 drivers/thermal/tegra/tegra-bpmp-thermal.c 	return tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
zone               84 drivers/thermal/tegra/tegra-bpmp-thermal.c 	struct tegra_bpmp_thermal_zone *zone;
zone               86 drivers/thermal/tegra/tegra-bpmp-thermal.c 	zone = container_of(work, struct tegra_bpmp_thermal_zone,
zone               89 drivers/thermal/tegra/tegra-bpmp-thermal.c 	thermal_zone_device_update(zone->tzd, THERMAL_TRIP_VIOLATED);
zone              109 drivers/thermal/tegra/tegra-bpmp-thermal.c 		if (tegra->zones[i]->idx != req->host_trip_reached.zone)
zone              118 drivers/thermal/tegra/tegra-bpmp-thermal.c 		req->host_trip_reached.zone);
zone              182 drivers/thermal/tegra/tegra-bpmp-thermal.c 		struct tegra_bpmp_thermal_zone *zone;
zone              185 drivers/thermal/tegra/tegra-bpmp-thermal.c 		zone = devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
zone              186 drivers/thermal/tegra/tegra-bpmp-thermal.c 		if (!zone)
zone              189 drivers/thermal/tegra/tegra-bpmp-thermal.c 		zone->idx = i;
zone              190 drivers/thermal/tegra/tegra-bpmp-thermal.c 		zone->tegra = tegra;
zone              192 drivers/thermal/tegra/tegra-bpmp-thermal.c 		err = tegra_bpmp_thermal_get_temp(zone, &temp);
zone              194 drivers/thermal/tegra/tegra-bpmp-thermal.c 			devm_kfree(&pdev->dev, zone);
zone              199 drivers/thermal/tegra/tegra-bpmp-thermal.c 			&pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops);
zone              203 drivers/thermal/tegra/tegra-bpmp-thermal.c 			devm_kfree(&pdev->dev, zone);
zone              207 drivers/thermal/tegra/tegra-bpmp-thermal.c 		zone->tzd = tzd;
zone              208 drivers/thermal/tegra/tegra-bpmp-thermal.c 		INIT_WORK(&zone->tz_device_update_work,
zone              211 drivers/thermal/tegra/tegra-bpmp-thermal.c 		tegra->zones[tegra->num_zones++] = zone;
zone              542 drivers/usb/storage/alauda.c 	unsigned int zone)
zone              544 drivers/usb/storage/alauda.c 	u16 *pba_to_lba = info->pba_to_lba[zone];
zone              549 drivers/usb/storage/alauda.c 			return (zone << info->zoneshift) + i;
zone              558 drivers/usb/storage/alauda.c static int alauda_read_map(struct us_data *us, unsigned int zone)
zone              566 drivers/usb/storage/alauda.c 	unsigned int zone_base_lba = zone * uzonesize;
zone              567 drivers/usb/storage/alauda.c 	unsigned int zone_base_pba = zone * zonesize;
zone              575 drivers/usb/storage/alauda.c 	usb_stor_dbg(us, "Mapping blocks for zone %d\n", zone);
zone              666 drivers/usb/storage/alauda.c 	MEDIA_INFO(us).lba_to_pba[zone] = lba_to_pba;
zone              667 drivers/usb/storage/alauda.c 	MEDIA_INFO(us).pba_to_lba[zone] = pba_to_lba;
zone              682 drivers/usb/storage/alauda.c static void alauda_ensure_map_for_zone(struct us_data *us, unsigned int zone)
zone              684 drivers/usb/storage/alauda.c 	if (MEDIA_INFO(us).lba_to_pba[zone] == NULL
zone              685 drivers/usb/storage/alauda.c 		|| MEDIA_INFO(us).pba_to_lba[zone] == NULL)
zone              686 drivers/usb/storage/alauda.c 		alauda_read_map(us, zone);
zone              814 drivers/usb/storage/alauda.c 	unsigned int zone = lba / uzonesize;
zone              816 drivers/usb/storage/alauda.c 	alauda_ensure_map_for_zone(us, zone);
zone              818 drivers/usb/storage/alauda.c 	pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset];
zone              829 drivers/usb/storage/alauda.c 	new_pba = alauda_find_unused_pba(&MEDIA_INFO(us), zone);
zone              887 drivers/usb/storage/alauda.c 	new_pba_offset = new_pba - (zone * zonesize);
zone              888 drivers/usb/storage/alauda.c 	MEDIA_INFO(us).pba_to_lba[zone][new_pba_offset] = lba;
zone              889 drivers/usb/storage/alauda.c 	MEDIA_INFO(us).lba_to_pba[zone][lba_offset] = new_pba;
zone              893 drivers/usb/storage/alauda.c 		unsigned int pba_offset = pba - (zone * zonesize);
zone              897 drivers/usb/storage/alauda.c 		MEDIA_INFO(us).pba_to_lba[zone][pba_offset] = UNDEF;
zone              943 drivers/usb/storage/alauda.c 		unsigned int zone = lba / uzonesize; /* integer division */
zone              944 drivers/usb/storage/alauda.c 		unsigned int lba_offset = lba - (zone * uzonesize);
zone              947 drivers/usb/storage/alauda.c 		alauda_ensure_map_for_zone(us, zone);
zone              962 drivers/usb/storage/alauda.c 		pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset];
zone              714 drivers/usb/storage/sddr55.c 		int zone = i / 1024;
zone              742 drivers/usb/storage/sddr55.c 		if (info->lba_to_pba[lba + zone * 1000] != NOT_ALLOCATED &&
zone              746 drivers/usb/storage/sddr55.c 			       lba + zone * 1000);
zone              753 drivers/usb/storage/sddr55.c 		info->lba_to_pba[lba + zone * 1000] = i;
zone              186 fs/adfs/map.c  static int scan_map(struct adfs_sb_info *asb, unsigned int zone,
zone              193 fs/adfs/map.c  	dm	= asb->s_map + zone;
zone              194 fs/adfs/map.c  	zone	= asb->s_map_size;
zone              195 fs/adfs/map.c  	dm_end	= asb->s_map + zone;
zone              206 fs/adfs/map.c  	} while (--zone > 0);
zone              229 fs/adfs/map.c  	unsigned int zone;
zone              232 fs/adfs/map.c  	zone = asb->s_map_size;
zone              236 fs/adfs/map.c  	} while (--zone > 0);
zone              244 fs/adfs/map.c  	unsigned int zone, mapoff;
zone              252 fs/adfs/map.c  		zone = asb->s_map_size >> 1;
zone              254 fs/adfs/map.c  		zone = frag_id / asb->s_ids_per_zone;
zone              256 fs/adfs/map.c  	if (zone >= asb->s_map_size)
zone              263 fs/adfs/map.c  	result = scan_map(asb, zone, frag_id, mapoff);
zone              280 fs/adfs/map.c  		   frag_id, zone, asb->s_map_size);
zone              329 fs/adfs/super.c 	int i, zone;
zone              346 fs/adfs/super.c 	for (zone = 0; zone < nzones; zone++, map_addr++) {
zone              347 fs/adfs/super.c 		dm[zone].dm_startbit = 0;
zone              348 fs/adfs/super.c 		dm[zone].dm_endbit   = zone_size;
zone              349 fs/adfs/super.c 		dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
zone              350 fs/adfs/super.c 		dm[zone].dm_bh       = sb_bread(sb, map_addr);
zone              352 fs/adfs/super.c 		if (!dm[zone].dm_bh) {
zone              359 fs/adfs/super.c 	i = zone - 1;
zone              371 fs/adfs/super.c 	while (--zone >= 0)
zone              372 fs/adfs/super.c 		brelse(dm[zone].dm_bh);
zone              229 fs/btrfs/reada.c 	struct reada_zone *zone;
zone              235 fs/btrfs/reada.c 	zone = NULL;
zone              237 fs/btrfs/reada.c 	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
zone              239 fs/btrfs/reada.c 	if (ret == 1 && logical >= zone->start && logical <= zone->end) {
zone              240 fs/btrfs/reada.c 		kref_get(&zone->refcnt);
zone              242 fs/btrfs/reada.c 		return zone;
zone              255 fs/btrfs/reada.c 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
zone              256 fs/btrfs/reada.c 	if (!zone)
zone              261 fs/btrfs/reada.c 		kfree(zone);
zone              265 fs/btrfs/reada.c 	zone->start = start;
zone              266 fs/btrfs/reada.c 	zone->end = end;
zone              267 fs/btrfs/reada.c 	INIT_LIST_HEAD(&zone->list);
zone              268 fs/btrfs/reada.c 	spin_lock_init(&zone->lock);
zone              269 fs/btrfs/reada.c 	zone->locked = 0;
zone              270 fs/btrfs/reada.c 	kref_init(&zone->refcnt);
zone              271 fs/btrfs/reada.c 	zone->elems = 0;
zone              272 fs/btrfs/reada.c 	zone->device = dev; /* our device always sits at index 0 */
zone              275 fs/btrfs/reada.c 		zone->devs[i] = bbio->stripes[i].dev;
zone              277 fs/btrfs/reada.c 	zone->ndevs = bbio->num_stripes;
zone              281 fs/btrfs/reada.c 				(unsigned long)(zone->end >> PAGE_SHIFT),
zone              282 fs/btrfs/reada.c 				zone);
zone              285 fs/btrfs/reada.c 		kfree(zone);
zone              286 fs/btrfs/reada.c 		ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
zone              288 fs/btrfs/reada.c 		if (ret == 1 && logical >= zone->start && logical <= zone->end)
zone              289 fs/btrfs/reada.c 			kref_get(&zone->refcnt);
zone              291 fs/btrfs/reada.c 			zone = NULL;
zone              296 fs/btrfs/reada.c 	return zone;
zone              353 fs/btrfs/reada.c 		struct reada_zone *zone;
zone              361 fs/btrfs/reada.c 		zone = reada_find_zone(dev, logical, bbio);
zone              362 fs/btrfs/reada.c 		if (!zone)
zone              365 fs/btrfs/reada.c 		re->zones[re->nzones++] = zone;
zone              366 fs/btrfs/reada.c 		spin_lock(&zone->lock);
zone              367 fs/btrfs/reada.c 		if (!zone->elems)
zone              368 fs/btrfs/reada.c 			kref_get(&zone->refcnt);
zone              369 fs/btrfs/reada.c 		++zone->elems;
zone              370 fs/btrfs/reada.c 		spin_unlock(&zone->lock);
zone              372 fs/btrfs/reada.c 		kref_put(&zone->refcnt, reada_zone_release);
zone              459 fs/btrfs/reada.c 		struct reada_zone *zone;
zone              461 fs/btrfs/reada.c 		zone = re->zones[nzones];
zone              462 fs/btrfs/reada.c 		kref_get(&zone->refcnt);
zone              463 fs/btrfs/reada.c 		spin_lock(&zone->lock);
zone              464 fs/btrfs/reada.c 		--zone->elems;
zone              465 fs/btrfs/reada.c 		if (zone->elems == 0) {
zone              470 fs/btrfs/reada.c 			kref_put(&zone->refcnt, reada_zone_release);
zone              472 fs/btrfs/reada.c 		spin_unlock(&zone->lock);
zone              475 fs/btrfs/reada.c 		kref_put(&zone->refcnt, reada_zone_release);
zone              497 fs/btrfs/reada.c 		struct reada_zone *zone = re->zones[i];
zone              499 fs/btrfs/reada.c 		radix_tree_delete(&zone->device->reada_extents, index);
zone              505 fs/btrfs/reada.c 		struct reada_zone *zone = re->zones[i];
zone              507 fs/btrfs/reada.c 		kref_get(&zone->refcnt);
zone              508 fs/btrfs/reada.c 		spin_lock(&zone->lock);
zone              509 fs/btrfs/reada.c 		--zone->elems;
zone              510 fs/btrfs/reada.c 		if (zone->elems == 0) {
zone              513 fs/btrfs/reada.c 			kref_put(&zone->refcnt, reada_zone_release);
zone              515 fs/btrfs/reada.c 		spin_unlock(&zone->lock);
zone              518 fs/btrfs/reada.c 		kref_put(&zone->refcnt, reada_zone_release);
zone              527 fs/btrfs/reada.c 	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
zone              529 fs/btrfs/reada.c 	radix_tree_delete(&zone->device->reada_zones,
zone              530 fs/btrfs/reada.c 			  zone->end >> PAGE_SHIFT);
zone              532 fs/btrfs/reada.c 	kfree(zone);
zone              577 fs/btrfs/reada.c static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
zone              580 fs/btrfs/reada.c 	unsigned long index = zone->end >> PAGE_SHIFT;
zone              582 fs/btrfs/reada.c 	for (i = 0; i < zone->ndevs; ++i) {
zone              584 fs/btrfs/reada.c 		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
zone              585 fs/btrfs/reada.c 		if (peer && peer->device != zone->device)
zone              609 fs/btrfs/reada.c 		struct reada_zone *zone;
zone              612 fs/btrfs/reada.c 					     (void **)&zone, index, 1);
zone              615 fs/btrfs/reada.c 		index = (zone->end >> PAGE_SHIFT) + 1;
zone              616 fs/btrfs/reada.c 		if (zone->locked) {
zone              617 fs/btrfs/reada.c 			if (zone->elems > top_locked_elems) {
zone              618 fs/btrfs/reada.c 				top_locked_elems = zone->elems;
zone              619 fs/btrfs/reada.c 				top_locked_zone = zone;
zone              622 fs/btrfs/reada.c 			if (zone->elems > top_elems) {
zone              623 fs/btrfs/reada.c 				top_elems = zone->elems;
zone              624 fs/btrfs/reada.c 				top_zone = zone;
zone              846 fs/btrfs/reada.c 			struct reada_zone *zone;
zone              848 fs/btrfs/reada.c 						     (void **)&zone, index, 1);
zone              852 fs/btrfs/reada.c 				    zone->start, zone->end, zone->elems,
zone              853 fs/btrfs/reada.c 				    zone->locked);
zone              854 fs/btrfs/reada.c 			for (j = 0; j < zone->ndevs; ++j) {
zone              856 fs/btrfs/reada.c 					zone->devs[j]->devid);
zone              858 fs/btrfs/reada.c 			if (device->reada_curr_zone == zone)
zone              860 fs/btrfs/reada.c 					device->reada_next - zone->start);
zone              862 fs/btrfs/reada.c 			index = (zone->end >> PAGE_SHIFT) + 1;
zone             2438 fs/f2fs/segment.c 		if (CURSEG_I(sbi, i)->zone == zoneno)
zone             2466 fs/f2fs/segment.c 	curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
zone              301 fs/f2fs/segment.h 	unsigned int zone;			/* current zone number */
zone               48 fs/minix/bitmap.c 	unsigned long bit, zone;
zone               54 fs/minix/bitmap.c 	zone = block - sbi->s_firstdatazone + 1;
zone               55 fs/minix/bitmap.c 	bit = zone & ((1<<k) - 1);
zone               56 fs/minix/bitmap.c 	zone >>= k;
zone               57 fs/minix/bitmap.c 	if (zone >= sbi->s_zmap_blocks) {
zone               61 fs/minix/bitmap.c 	bh = sbi->s_zmap[zone];
zone              134 fs/ntfs/lcnalloc.c 		const NTFS_CLUSTER_ALLOCATION_ZONES zone,
zone              153 fs/ntfs/lcnalloc.c 			zone == MFT_ZONE ? "MFT" : "DATA");
zone              160 fs/ntfs/lcnalloc.c 	BUG_ON(zone < FIRST_ZONE);
zone              161 fs/ntfs/lcnalloc.c 	BUG_ON(zone > LAST_ZONE);
zone              188 fs/ntfs/lcnalloc.c 		if (zone == DATA_ZONE)
zone              199 fs/ntfs/lcnalloc.c 	} else if (zone == DATA_ZONE && zone_start >= vol->mft_zone_start &&
zone              207 fs/ntfs/lcnalloc.c 	} else if (zone == MFT_ZONE && (zone_start < vol->mft_zone_start ||
zone              218 fs/ntfs/lcnalloc.c 	if (zone == MFT_ZONE) {
zone              682 fs/ntfs/lcnalloc.c 		if (zone == MFT_ZONE || mft_zone_size <= 0) {
zone               31 fs/ntfs/lcnalloc.h 		const NTFS_CLUSTER_ALLOCATION_ZONES zone,
zone              188 fs/sysv/balloc.c 		sysv_zone_t zone;
zone              191 fs/sysv/balloc.c 		zone = 0;
zone              192 fs/sysv/balloc.c 		while (n && (zone = blocks[--n]) != 0)
zone              194 fs/sysv/balloc.c 		if (zone == 0)
zone              197 fs/sysv/balloc.c 		block = fs32_to_cpu(sbi, zone);
zone              120 fs/xfs/kmem.c  kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
zone              126 fs/xfs/kmem.c  	trace_kmem_zone_alloc(kmem_cache_size(zone), flags, _RET_IP_);
zone              128 fs/xfs/kmem.c  		ptr = kmem_cache_alloc(zone, lflags);
zone              103 fs/xfs/kmem.h  kmem_zone_free(kmem_zone_t *zone, void *ptr)
zone              105 fs/xfs/kmem.h  	kmem_cache_free(zone, ptr);
zone              109 fs/xfs/kmem.h  kmem_zone_destroy(kmem_zone_t *zone)
zone              111 fs/xfs/kmem.h  	kmem_cache_destroy(zone);
zone              117 fs/xfs/kmem.h  kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
zone              119 fs/xfs/kmem.h  	return kmem_zone_alloc(zone, flags | KM_ZERO);
zone               12 fs/xfs/libxfs/xfs_da_btree.h struct zone;
zone               93 include/linux/compaction.h extern int fragmentation_index(struct zone *zone, unsigned int order);
zone               99 include/linux/compaction.h extern enum compact_result compaction_suitable(struct zone *zone, int order,
zone              102 include/linux/compaction.h extern void defer_compaction(struct zone *zone, int order);
zone              103 include/linux/compaction.h extern bool compaction_deferred(struct zone *zone, int order);
zone              104 include/linux/compaction.h extern void compaction_defer_reset(struct zone *zone, int order,
zone              106 include/linux/compaction.h extern bool compaction_restarting(struct zone *zone, int order);
zone              192 include/linux/compaction.h static inline enum compact_result compaction_suitable(struct zone *zone, int order,
zone              198 include/linux/compaction.h static inline void defer_compaction(struct zone *zone, int order)
zone              202 include/linux/compaction.h static inline bool compaction_deferred(struct zone *zone, int order)
zone               76 include/linux/cpuset.h static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
zone               81 include/linux/cpuset.h static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
zone              212 include/linux/cpuset.h static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
zone              217 include/linux/cpuset.h static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
zone              581 include/linux/gfp.h void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
zone              582 include/linux/gfp.h void drain_all_pages(struct zone *zone);
zone              583 include/linux/gfp.h void drain_local_pages(struct zone *zone);
zone              241 include/linux/memblock.h void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
zone              258 include/linux/memblock.h #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)	\
zone              260 include/linux/memblock.h 	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);	\
zone              262 include/linux/memblock.h 	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
zone              276 include/linux/memblock.h #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
zone              278 include/linux/memblock.h 	     __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
zone               11 include/linux/memory_hotplug.h struct zone;
zone               73 include/linux/memory_hotplug.h static inline unsigned zone_span_seqbegin(struct zone *zone)
zone               75 include/linux/memory_hotplug.h 	return read_seqbegin(&zone->span_seqlock);
zone               77 include/linux/memory_hotplug.h static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
zone               79 include/linux/memory_hotplug.h 	return read_seqretry(&zone->span_seqlock, iv);
zone               81 include/linux/memory_hotplug.h static inline void zone_span_writelock(struct zone *zone)
zone               83 include/linux/memory_hotplug.h 	write_seqlock(&zone->span_seqlock);
zone               85 include/linux/memory_hotplug.h static inline void zone_span_writeunlock(struct zone *zone)
zone               87 include/linux/memory_hotplug.h 	write_sequnlock(&zone->span_seqlock);
zone               89 include/linux/memory_hotplug.h static inline void zone_seqlock_init(struct zone *zone)
zone               91 include/linux/memory_hotplug.h 	seqlock_init(&zone->span_seqlock);
zone               93 include/linux/memory_hotplug.h extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
zone               94 include/linux/memory_hotplug.h extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
zone              232 include/linux/memory_hotplug.h extern void set_zone_contiguous(struct zone *zone);
zone              233 include/linux/memory_hotplug.h extern void clear_zone_contiguous(struct zone *zone);
zone              244 include/linux/memory_hotplug.h static inline unsigned zone_span_seqbegin(struct zone *zone)
zone              248 include/linux/memory_hotplug.h static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
zone              252 include/linux/memory_hotplug.h static inline void zone_span_writelock(struct zone *zone) {}
zone              253 include/linux/memory_hotplug.h static inline void zone_span_writeunlock(struct zone *zone) {}
zone              254 include/linux/memory_hotplug.h static inline void zone_seqlock_init(struct zone *zone) {}
zone              346 include/linux/memory_hotplug.h extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
zone              348 include/linux/memory_hotplug.h extern void remove_pfn_range_from_zone(struct zone *zone,
zone              361 include/linux/memory_hotplug.h extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
zone              972 include/linux/mm.h extern void memmap_init_zone_device(struct zone *, unsigned long,
zone             1254 include/linux/mm.h static inline struct zone *page_zone(const struct page *page)
zone             1277 include/linux/mm.h static inline void set_page_zone(struct page *page, enum zone_type zone)
zone             1280 include/linux/mm.h 	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
zone             1289 include/linux/mm.h static inline void set_page_links(struct page *page, enum zone_type zone,
zone             1292 include/linux/mm.h 	set_page_zone(page, zone);
zone             2229 include/linux/mm.h extern void zone_pcp_update(struct zone *zone);
zone             2230 include/linux/mm.h extern void zone_pcp_reset(struct zone *zone);
zone              586 include/linux/mmzone.h static inline unsigned long zone_managed_pages(struct zone *zone)
zone              588 include/linux/mmzone.h 	return (unsigned long)atomic_long_read(&zone->managed_pages);
zone              591 include/linux/mmzone.h static inline unsigned long zone_end_pfn(const struct zone *zone)
zone              593 include/linux/mmzone.h 	return zone->zone_start_pfn + zone->spanned_pages;
zone              596 include/linux/mmzone.h static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
zone              598 include/linux/mmzone.h 	return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
zone              601 include/linux/mmzone.h static inline bool zone_is_initialized(struct zone *zone)
zone              603 include/linux/mmzone.h 	return zone->initialized;
zone              606 include/linux/mmzone.h static inline bool zone_is_empty(struct zone *zone)
zone              608 include/linux/mmzone.h 	return zone->spanned_pages == 0;
zone              615 include/linux/mmzone.h static inline bool zone_intersects(struct zone *zone,
zone              618 include/linux/mmzone.h 	if (zone_is_empty(zone))
zone              620 include/linux/mmzone.h 	if (start_pfn >= zone_end_pfn(zone) ||
zone              621 include/linux/mmzone.h 	    start_pfn + nr_pages <= zone->zone_start_pfn)
zone              654 include/linux/mmzone.h 	struct zone *zone;	/* Pointer to actual zone */
zone              699 include/linux/mmzone.h 	struct zone node_zones[MAX_NR_ZONES];
zone              813 include/linux/mmzone.h void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
zone              815 include/linux/mmzone.h bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
zone              818 include/linux/mmzone.h bool zone_watermark_ok(struct zone *z, unsigned int order,
zone              821 include/linux/mmzone.h bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
zone              827 include/linux/mmzone.h extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
zone              864 include/linux/mmzone.h #define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)
zone              872 include/linux/mmzone.h static inline bool managed_zone(struct zone *zone)
zone              874 include/linux/mmzone.h 	return zone_managed_pages(zone);
zone              878 include/linux/mmzone.h static inline bool populated_zone(struct zone *zone)
zone              880 include/linux/mmzone.h 	return zone->present_pages;
zone              884 include/linux/mmzone.h static inline int zone_to_nid(struct zone *zone)
zone              886 include/linux/mmzone.h 	return zone->node;
zone              889 include/linux/mmzone.h static inline void zone_set_nid(struct zone *zone, int nid)
zone              891 include/linux/mmzone.h 	zone->node = nid;
zone              894 include/linux/mmzone.h static inline int zone_to_nid(struct zone *zone)
zone              899 include/linux/mmzone.h static inline void zone_set_nid(struct zone *zone, int nid) {}
zone              931 include/linux/mmzone.h static inline int is_highmem(struct zone *zone)
zone              934 include/linux/mmzone.h 	return is_highmem_idx(zone_idx(zone));
zone              977 include/linux/mmzone.h extern struct zone *next_zone(struct zone *zone);
zone              994 include/linux/mmzone.h #define for_each_zone(zone)			        \
zone              995 include/linux/mmzone.h 	for (zone = (first_online_pgdat())->node_zones; \
zone              996 include/linux/mmzone.h 	     zone;					\
zone              997 include/linux/mmzone.h 	     zone = next_zone(zone))
zone              999 include/linux/mmzone.h #define for_each_populated_zone(zone)		        \
zone             1000 include/linux/mmzone.h 	for (zone = (first_online_pgdat())->node_zones; \
zone             1001 include/linux/mmzone.h 	     zone;					\
zone             1002 include/linux/mmzone.h 	     zone = next_zone(zone))			\
zone             1003 include/linux/mmzone.h 		if (!populated_zone(zone))		\
zone             1007 include/linux/mmzone.h static inline struct zone *zonelist_zone(struct zoneref *zoneref)
zone             1009 include/linux/mmzone.h 	return zoneref->zone;
zone             1019 include/linux/mmzone.h 	return zone_to_nid(zoneref->zone);
zone             1082 include/linux/mmzone.h #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
zone             1083 include/linux/mmzone.h 	for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z);	\
zone             1084 include/linux/mmzone.h 		zone;							\
zone             1086 include/linux/mmzone.h 			zone = zonelist_zone(z))
zone             1088 include/linux/mmzone.h #define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
zone             1089 include/linux/mmzone.h 	for (zone = z->zone;	\
zone             1090 include/linux/mmzone.h 		zone;							\
zone             1092 include/linux/mmzone.h 			zone = zonelist_zone(z))
zone             1104 include/linux/mmzone.h #define for_each_zone_zonelist(zone, z, zlist, highidx) \
zone             1105 include/linux/mmzone.h 	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
zone             1444 include/linux/mmzone.h 					struct page *page, struct zone *zone);
zone             1447 include/linux/mmzone.h 					struct page *page, struct zone *zone)
zone                6 include/linux/page-isolation.h static inline bool has_isolate_pageblock(struct zone *zone)
zone                8 include/linux/page-isolation.h 	return zone->nr_isolate_pageblock;
zone               19 include/linux/page-isolation.h static inline bool has_isolate_pageblock(struct zone *zone)
zone               36 include/linux/page-isolation.h bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
zone               39 include/linux/page-isolation.h int move_freepages_block(struct zone *zone, struct page *page,
zone               19 include/linux/page_owner.h 					pg_data_t *pgdat, struct zone *zone);
zone              362 include/linux/suspend.h extern void mark_free_pages(struct zone *zone);
zone              140 include/linux/swap.h struct zone;
zone              351 include/linux/swap.h extern unsigned long zone_reclaimable_pages(struct zone *zone);
zone              136 include/linux/vmstat.h static inline void zone_numa_state_add(long x, struct zone *zone,
zone              139 include/linux/vmstat.h 	atomic_long_add(x, &zone->vm_numa_stat[item]);
zone              150 include/linux/vmstat.h static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
zone              153 include/linux/vmstat.h 	long x = atomic_long_read(&zone->vm_numa_stat[item]);
zone              157 include/linux/vmstat.h 		x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
zone              163 include/linux/vmstat.h static inline void zone_page_state_add(long x, struct zone *zone,
zone              166 include/linux/vmstat.h 	atomic_long_add(x, &zone->vm_stat[item]);
zone              197 include/linux/vmstat.h static inline unsigned long zone_page_state(struct zone *zone,
zone              200 include/linux/vmstat.h 	long x = atomic_long_read(&zone->vm_stat[item]);
zone              214 include/linux/vmstat.h static inline unsigned long zone_page_state_snapshot(struct zone *zone,
zone              217 include/linux/vmstat.h 	long x = atomic_long_read(&zone->vm_stat[item]);
zone              222 include/linux/vmstat.h 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
zone              231 include/linux/vmstat.h extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
zone              243 include/linux/vmstat.h void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
zone              251 include/linux/vmstat.h void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
zone              260 include/linux/vmstat.h extern void __inc_zone_state(struct zone *, enum zone_stat_item);
zone              262 include/linux/vmstat.h extern void dec_zone_state(struct zone *, enum zone_stat_item);
zone              263 include/linux/vmstat.h extern void __dec_zone_state(struct zone *, enum zone_stat_item);
zone              274 include/linux/vmstat.h void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
zone              276 include/linux/vmstat.h int calculate_pressure_threshold(struct zone *zone);
zone              277 include/linux/vmstat.h int calculate_normal_threshold(struct zone *zone);
zone              279 include/linux/vmstat.h 				int (*calculate_pressure)(struct zone *));
zone              286 include/linux/vmstat.h static inline void __mod_zone_page_state(struct zone *zone,
zone              289 include/linux/vmstat.h 	zone_page_state_add(delta, zone, item);
zone              298 include/linux/vmstat.h static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
zone              300 include/linux/vmstat.h 	atomic_long_inc(&zone->vm_stat[item]);
zone              310 include/linux/vmstat.h static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
zone              312 include/linux/vmstat.h 	atomic_long_dec(&zone->vm_stat[item]);
zone              370 include/linux/vmstat.h static inline void drain_zonestat(struct zone *zone,
zone              374 include/linux/vmstat.h static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
zone              377 include/linux/vmstat.h 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
zone              379 include/linux/vmstat.h 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
zone              198 include/net/flow_offload.h 			u16 zone;
zone               74 include/net/netfilter/nf_conntrack.h 	struct nf_conntrack_zone zone;
zone              233 include/net/netfilter/nf_conntrack.h 				   const struct nf_conntrack_zone *zone,
zone              310 include/net/netfilter/nf_conntrack.h 				 const struct nf_conntrack_zone *zone,
zone               50 include/net/netfilter/nf_conntrack_core.h 		      const struct nf_conntrack_zone *zone,
zone               26 include/net/netfilter/nf_conntrack_count.h 				const struct nf_conntrack_zone *zone);
zone               30 include/net/netfilter/nf_conntrack_count.h 		     const struct nf_conntrack_zone *zone);
zone               92 include/net/netfilter/nf_conntrack_expect.h 		    const struct nf_conntrack_zone *zone,
zone               97 include/net/netfilter/nf_conntrack_expect.h 		      const struct nf_conntrack_zone *zone,
zone              102 include/net/netfilter/nf_conntrack_expect.h 		       const struct nf_conntrack_zone *zone,
zone               12 include/net/netfilter/nf_conntrack_zones.h 	return &ct->zone;
zone               19 include/net/netfilter/nf_conntrack_zones.h nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
zone               21 include/net/netfilter/nf_conntrack_zones.h 	zone->id = id;
zone               22 include/net/netfilter/nf_conntrack_zones.h 	zone->flags = flags;
zone               23 include/net/netfilter/nf_conntrack_zones.h 	zone->dir = dir;
zone               25 include/net/netfilter/nf_conntrack_zones.h 	return zone;
zone               36 include/net/netfilter/nf_conntrack_zones.h 	if (tmpl->zone.flags & NF_CT_FLAG_MARK)
zone               37 include/net/netfilter/nf_conntrack_zones.h 		return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0);
zone               43 include/net/netfilter/nf_conntrack_zones.h 				  const struct nf_conntrack_zone *zone)
zone               46 include/net/netfilter/nf_conntrack_zones.h 	ct->zone = *zone;
zone               50 include/net/netfilter/nf_conntrack_zones.h static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
zone               53 include/net/netfilter/nf_conntrack_zones.h 	return zone->dir & (1 << dir);
zone               56 include/net/netfilter/nf_conntrack_zones.h static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
zone               60 include/net/netfilter/nf_conntrack_zones.h 	return nf_ct_zone_matches_dir(zone, dir) ?
zone               61 include/net/netfilter/nf_conntrack_zones.h 	       zone->id : NF_CT_DEFAULT_ZONE_ID;
zone               10 include/net/tc_act/tc_connmark.h 	u16 zone;
zone               14 include/net/tc_act/tc_ct.h 	u16 zone;
zone               41 include/net/tc_act/tc_ct.h 	return to_ct_params(a)->zone;
zone               13 include/net/tc_act/tc_ctinfo.h 	u16 zone;
zone             1504 include/soc/tegra/bpmp-abi.h 	uint32_t zone;
zone             1529 include/soc/tegra/bpmp-abi.h 	uint32_t zone;
zone             1541 include/soc/tegra/bpmp-abi.h 	uint32_t zone;
zone              168 include/sound/emux_synth.h 	struct snd_sf_zone *zone;	/* Zone assigned to this note */
zone              199 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone,
zone              203 include/trace/events/compaction.h 	TP_ARGS(zone, order, ret),
zone              213 include/trace/events/compaction.h 		__entry->nid = zone_to_nid(zone);
zone              214 include/trace/events/compaction.h 		__entry->idx = zone_idx(zone);
zone              228 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone,
zone              232 include/trace/events/compaction.h 	TP_ARGS(zone, order, ret)
zone              237 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone,
zone              241 include/trace/events/compaction.h 	TP_ARGS(zone, order, ret)
zone              246 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone, int order),
zone              248 include/trace/events/compaction.h 	TP_ARGS(zone, order),
zone              260 include/trace/events/compaction.h 		__entry->nid = zone_to_nid(zone);
zone              261 include/trace/events/compaction.h 		__entry->idx = zone_idx(zone);
zone              263 include/trace/events/compaction.h 		__entry->considered = zone->compact_considered;
zone              264 include/trace/events/compaction.h 		__entry->defer_shift = zone->compact_defer_shift;
zone              265 include/trace/events/compaction.h 		__entry->order_failed = zone->compact_order_failed;
zone              279 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone, int order),
zone              281 include/trace/events/compaction.h 	TP_ARGS(zone, order)
zone              286 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone, int order),
zone              288 include/trace/events/compaction.h 	TP_ARGS(zone, order)
zone              293 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone, int order),
zone              295 include/trace/events/compaction.h 	TP_ARGS(zone, order)
zone               56 include/trace/events/oom.h 		__entry->node = zone_to_nid(zoneref->zone);
zone               21 include/uapi/linux/netfilter/xt_CT.h 	__u16 zone;
zone               32 include/uapi/linux/netfilter/xt_CT.h 	__u16 zone;
zone               10 include/uapi/linux/tc_act/tc_connmark.h 	__u16 zone;
zone              267 include/uapi/linux/tipc.h static inline __u32 tipc_addr(unsigned int zone,
zone              271 include/uapi/linux/tipc.h 	return (zone << TIPC_ZONE_OFFSET) |
zone              419 kernel/crash_core.c 	VMCOREINFO_STRUCT_SIZE(zone);
zone              440 kernel/crash_core.c 	VMCOREINFO_OFFSET(zone, free_area);
zone              441 kernel/crash_core.c 	VMCOREINFO_OFFSET(zone, vm_stat);
zone              442 kernel/crash_core.c 	VMCOREINFO_OFFSET(zone, spanned_pages);
zone              448 kernel/crash_core.c 	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
zone              150 kernel/power/power.h extern unsigned int snapshot_additional_pages(struct zone *zone);
zone              371 kernel/power/snapshot.c 	struct mem_zone_bm_rtree *zone;
zone              428 kernel/power/snapshot.c static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
zone              435 kernel/power/snapshot.c 	block_nr = zone->blocks;
zone              445 kernel/power/snapshot.c 	for (i = zone->levels; i < levels_needed; i++) {
zone              447 kernel/power/snapshot.c 					&zone->nodes);
zone              451 kernel/power/snapshot.c 		node->data[0] = (unsigned long)zone->rtree;
zone              452 kernel/power/snapshot.c 		zone->rtree = node;
zone              453 kernel/power/snapshot.c 		zone->levels += 1;
zone              457 kernel/power/snapshot.c 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
zone              462 kernel/power/snapshot.c 	node = zone->rtree;
zone              463 kernel/power/snapshot.c 	dst = &zone->rtree;
zone              464 kernel/power/snapshot.c 	block_nr = zone->blocks;
zone              465 kernel/power/snapshot.c 	for (i = zone->levels; i > 0; i--) {
zone              470 kernel/power/snapshot.c 						&zone->nodes);
zone              482 kernel/power/snapshot.c 	zone->blocks += 1;
zone              488 kernel/power/snapshot.c static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
zone              504 kernel/power/snapshot.c 	struct mem_zone_bm_rtree *zone;
zone              509 kernel/power/snapshot.c 	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
zone              510 kernel/power/snapshot.c 	if (!zone)
zone              513 kernel/power/snapshot.c 	INIT_LIST_HEAD(&zone->nodes);
zone              514 kernel/power/snapshot.c 	INIT_LIST_HEAD(&zone->leaves);
zone              515 kernel/power/snapshot.c 	zone->start_pfn = start;
zone              516 kernel/power/snapshot.c 	zone->end_pfn = end;
zone              520 kernel/power/snapshot.c 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
zone              521 kernel/power/snapshot.c 			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
zone              526 kernel/power/snapshot.c 	return zone;
zone              536 kernel/power/snapshot.c static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
zone              541 kernel/power/snapshot.c 	list_for_each_entry(node, &zone->nodes, list)
zone              544 kernel/power/snapshot.c 	list_for_each_entry(node, &zone->leaves, list)
zone              550 kernel/power/snapshot.c 	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
zone              552 kernel/power/snapshot.c 	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
zone              589 kernel/power/snapshot.c 	struct zone *zone;
zone              593 kernel/power/snapshot.c 	for_each_populated_zone(zone) {
zone              597 kernel/power/snapshot.c 		zone_start = zone->zone_start_pfn;
zone              598 kernel/power/snapshot.c 		zone_end = zone_end_pfn(zone);
zone              659 kernel/power/snapshot.c 		struct mem_zone_bm_rtree *zone;
zone              661 kernel/power/snapshot.c 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
zone              663 kernel/power/snapshot.c 		if (!zone) {
zone              667 kernel/power/snapshot.c 		list_add_tail(&zone->list, &bm->zones);
zone              688 kernel/power/snapshot.c 	struct mem_zone_bm_rtree *zone;
zone              690 kernel/power/snapshot.c 	list_for_each_entry(zone, &bm->zones, list)
zone              691 kernel/power/snapshot.c 		free_zone_bm_rtree(zone, clear_nosave_free);
zone              710 kernel/power/snapshot.c 	struct mem_zone_bm_rtree *curr, *zone;
zone              714 kernel/power/snapshot.c 	zone = bm->cur.zone;
zone              716 kernel/power/snapshot.c 	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
zone              719 kernel/power/snapshot.c 	zone = NULL;
zone              724 kernel/power/snapshot.c 			zone = curr;
zone              729 kernel/power/snapshot.c 	if (!zone)
zone              744 kernel/power/snapshot.c 	if (zone == bm->cur.zone &&
zone              745 kernel/power/snapshot.c 	    ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
zone              748 kernel/power/snapshot.c 	node      = zone->rtree;
zone              749 kernel/power/snapshot.c 	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
zone              751 kernel/power/snapshot.c 	for (i = zone->levels; i > 0; i--) {
zone              762 kernel/power/snapshot.c 	bm->cur.zone = zone;
zone              764 kernel/power/snapshot.c 	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
zone              768 kernel/power/snapshot.c 	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
zone              847 kernel/power/snapshot.c 	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
zone              857 kernel/power/snapshot.c 	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
zone              858 kernel/power/snapshot.c 		bm->cur.zone = list_entry(bm->cur.zone->list.next,
zone              860 kernel/power/snapshot.c 		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
zone              888 kernel/power/snapshot.c 		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
zone              893 kernel/power/snapshot.c 			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
zone              914 kernel/power/snapshot.c static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
zone              918 kernel/power/snapshot.c 	list_for_each_entry(node, &zone->nodes, list)
zone              921 kernel/power/snapshot.c 	list_for_each_entry(node, &zone->leaves, list)
zone              927 kernel/power/snapshot.c 	struct mem_zone_bm_rtree *zone;
zone              930 kernel/power/snapshot.c 	list_for_each_entry(zone, &bm->zones, list)
zone              931 kernel/power/snapshot.c 		recycle_zone_bm_rtree(zone);
zone             1178 kernel/power/snapshot.c unsigned int snapshot_additional_pages(struct zone *zone)
zone             1182 kernel/power/snapshot.c 	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
zone             1201 kernel/power/snapshot.c 	struct zone *zone;
zone             1204 kernel/power/snapshot.c 	for_each_populated_zone(zone)
zone             1205 kernel/power/snapshot.c 		if (is_highmem(zone))
zone             1206 kernel/power/snapshot.c 			cnt += zone_page_state(zone, NR_FREE_PAGES);
zone             1219 kernel/power/snapshot.c static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
zone             1227 kernel/power/snapshot.c 	if (!page || page_zone(page) != zone)
zone             1249 kernel/power/snapshot.c 	struct zone *zone;
zone             1252 kernel/power/snapshot.c 	for_each_populated_zone(zone) {
zone             1255 kernel/power/snapshot.c 		if (!is_highmem(zone))
zone             1258 kernel/power/snapshot.c 		mark_free_pages(zone);
zone             1259 kernel/power/snapshot.c 		max_zone_pfn = zone_end_pfn(zone);
zone             1260 kernel/power/snapshot.c 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone             1261 kernel/power/snapshot.c 			if (saveable_highmem_page(zone, pfn))
zone             1267 kernel/power/snapshot.c static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
zone             1283 kernel/power/snapshot.c static struct page *saveable_page(struct zone *zone, unsigned long pfn)
zone             1291 kernel/power/snapshot.c 	if (!page || page_zone(page) != zone)
zone             1317 kernel/power/snapshot.c 	struct zone *zone;
zone             1321 kernel/power/snapshot.c 	for_each_populated_zone(zone) {
zone             1322 kernel/power/snapshot.c 		if (is_highmem(zone))
zone             1325 kernel/power/snapshot.c 		mark_free_pages(zone);
zone             1326 kernel/power/snapshot.c 		max_zone_pfn = zone_end_pfn(zone);
zone             1327 kernel/power/snapshot.c 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone             1328 kernel/power/snapshot.c 			if (saveable_page(zone, pfn))
zone             1366 kernel/power/snapshot.c static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
zone             1368 kernel/power/snapshot.c 	return is_highmem(zone) ?
zone             1369 kernel/power/snapshot.c 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
zone             1401 kernel/power/snapshot.c #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
zone             1413 kernel/power/snapshot.c 	struct zone *zone;
zone             1416 kernel/power/snapshot.c 	for_each_populated_zone(zone) {
zone             1419 kernel/power/snapshot.c 		mark_free_pages(zone);
zone             1420 kernel/power/snapshot.c 		max_zone_pfn = zone_end_pfn(zone);
zone             1421 kernel/power/snapshot.c 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone             1422 kernel/power/snapshot.c 			if (page_is_saveable(zone, pfn))
zone             1702 kernel/power/snapshot.c 	struct zone *zone;
zone             1734 kernel/power/snapshot.c 	for_each_populated_zone(zone) {
zone             1735 kernel/power/snapshot.c 		size += snapshot_additional_pages(zone);
zone             1736 kernel/power/snapshot.c 		if (is_highmem(zone))
zone             1737 kernel/power/snapshot.c 			highmem += zone_page_state(zone, NR_FREE_PAGES);
zone             1739 kernel/power/snapshot.c 			count += zone_page_state(zone, NR_FREE_PAGES);
zone             1877 kernel/power/snapshot.c 	struct zone *zone;
zone             1880 kernel/power/snapshot.c 	for_each_populated_zone(zone)
zone             1881 kernel/power/snapshot.c 		if (!is_highmem(zone))
zone             1882 kernel/power/snapshot.c 			free += zone_page_state(zone, NR_FREE_PAGES);
zone               23 lib/show_mem.c 			struct zone *zone = &pgdat->node_zones[zoneid];
zone               24 lib/show_mem.c 			if (!populated_zone(zone))
zone               27 lib/show_mem.c 			total += zone->present_pages;
zone               28 lib/show_mem.c 			reserved += zone->present_pages - zone_managed_pages(zone);
zone               31 lib/show_mem.c 				highmem += zone->present_pages;
zone              101 mm/cma.c       	struct zone *zone;
zone              111 mm/cma.c       	zone = page_zone(pfn_to_page(pfn));
zone              125 mm/cma.c       			if (page_zone(pfn_to_page(pfn)) != zone)
zone              142 mm/compaction.c void defer_compaction(struct zone *zone, int order)
zone              144 mm/compaction.c 	zone->compact_considered = 0;
zone              145 mm/compaction.c 	zone->compact_defer_shift++;
zone              147 mm/compaction.c 	if (order < zone->compact_order_failed)
zone              148 mm/compaction.c 		zone->compact_order_failed = order;
zone              150 mm/compaction.c 	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
zone              151 mm/compaction.c 		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
zone              153 mm/compaction.c 	trace_mm_compaction_defer_compaction(zone, order);
zone              157 mm/compaction.c bool compaction_deferred(struct zone *zone, int order)
zone              159 mm/compaction.c 	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
zone              161 mm/compaction.c 	if (order < zone->compact_order_failed)
zone              165 mm/compaction.c 	if (++zone->compact_considered > defer_limit)
zone              166 mm/compaction.c 		zone->compact_considered = defer_limit;
zone              168 mm/compaction.c 	if (zone->compact_considered >= defer_limit)
zone              171 mm/compaction.c 	trace_mm_compaction_deferred(zone, order);
zone              181 mm/compaction.c void compaction_defer_reset(struct zone *zone, int order,
zone              185 mm/compaction.c 		zone->compact_considered = 0;
zone              186 mm/compaction.c 		zone->compact_defer_shift = 0;
zone              188 mm/compaction.c 	if (order >= zone->compact_order_failed)
zone              189 mm/compaction.c 		zone->compact_order_failed = order + 1;
zone              191 mm/compaction.c 	trace_mm_compaction_defer_reset(zone, order);
zone              195 mm/compaction.c bool compaction_restarting(struct zone *zone, int order)
zone              197 mm/compaction.c 	if (order < zone->compact_order_failed)
zone              200 mm/compaction.c 	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
zone              201 mm/compaction.c 		zone->compact_considered >= 1UL << zone->compact_defer_shift;
zone              214 mm/compaction.c static void reset_cached_positions(struct zone *zone)
zone              216 mm/compaction.c 	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
zone              217 mm/compaction.c 	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
zone              218 mm/compaction.c 	zone->compact_cached_free_pfn =
zone              219 mm/compaction.c 				pageblock_start_pfn(zone_end_pfn(zone) - 1);
zone              241 mm/compaction.c __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
zone              251 mm/compaction.c 	if (zone != page_zone(page))
zone              273 mm/compaction.c 	block_pfn = max(block_pfn, zone->zone_start_pfn);
zone              282 mm/compaction.c 	block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
zone              317 mm/compaction.c static void __reset_isolation_suitable(struct zone *zone)
zone              319 mm/compaction.c 	unsigned long migrate_pfn = zone->zone_start_pfn;
zone              320 mm/compaction.c 	unsigned long free_pfn = zone_end_pfn(zone) - 1;
zone              326 mm/compaction.c 	if (!zone->compact_blockskip_flush)
zone              329 mm/compaction.c 	zone->compact_blockskip_flush = false;
zone              342 mm/compaction.c 		if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
zone              346 mm/compaction.c 			zone->compact_init_migrate_pfn = reset_migrate;
zone              347 mm/compaction.c 			zone->compact_cached_migrate_pfn[0] = reset_migrate;
zone              348 mm/compaction.c 			zone->compact_cached_migrate_pfn[1] = reset_migrate;
zone              352 mm/compaction.c 		if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
zone              356 mm/compaction.c 			zone->compact_init_free_pfn = reset_free;
zone              357 mm/compaction.c 			zone->compact_cached_free_pfn = reset_free;
zone              363 mm/compaction.c 		zone->compact_cached_migrate_pfn[0] = migrate_pfn;
zone              364 mm/compaction.c 		zone->compact_cached_migrate_pfn[1] = migrate_pfn;
zone              365 mm/compaction.c 		zone->compact_cached_free_pfn = free_pfn;
zone              374 mm/compaction.c 		struct zone *zone = &pgdat->node_zones[zoneid];
zone              375 mm/compaction.c 		if (!populated_zone(zone))
zone              379 mm/compaction.c 		if (zone->compact_blockskip_flush)
zone              380 mm/compaction.c 			__reset_isolation_suitable(zone);
zone              409 mm/compaction.c 	struct zone *zone = cc->zone;
zone              417 mm/compaction.c 	if (pfn > zone->compact_cached_migrate_pfn[0])
zone              418 mm/compaction.c 		zone->compact_cached_migrate_pfn[0] = pfn;
zone              420 mm/compaction.c 	    pfn > zone->compact_cached_migrate_pfn[1])
zone              421 mm/compaction.c 		zone->compact_cached_migrate_pfn[1] = pfn;
zone              431 mm/compaction.c 	struct zone *zone = cc->zone;
zone              442 mm/compaction.c 	if (pfn < zone->compact_cached_free_pfn)
zone              443 mm/compaction.c 		zone->compact_cached_free_pfn = pfn;
zone              566 mm/compaction.c 		    && compact_unlock_should_abort(&cc->zone->lock, flags,
zone              601 mm/compaction.c 			locked = compact_lock_irqsave(&cc->zone->lock,
zone              638 mm/compaction.c 		spin_unlock_irqrestore(&cc->zone->lock, flags);
zone              690 mm/compaction.c 	if (block_start_pfn < cc->zone->zone_start_pfn)
zone              691 mm/compaction.c 		block_start_pfn = cc->zone->zone_start_pfn;
zone              714 mm/compaction.c 					block_end_pfn, cc->zone))
zone              785 mm/compaction.c 	pg_data_t *pgdat = cc->zone->zone_pgdat;
zone             1093 mm/compaction.c 	if (block_start_pfn < cc->zone->zone_start_pfn)
zone             1094 mm/compaction.c 		block_start_pfn = cc->zone->zone_start_pfn;
zone             1104 mm/compaction.c 					block_end_pfn, cc->zone))
zone             1237 mm/compaction.c 	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
zone             1294 mm/compaction.c 	if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
zone             1319 mm/compaction.c 		struct free_area *area = &cc->zone->free_area[order];
zone             1328 mm/compaction.c 		spin_lock_irqsave(&cc->zone->lock, flags);
zone             1384 mm/compaction.c 		spin_unlock_irqrestore(&cc->zone->lock, flags);
zone             1414 mm/compaction.c 	if (highest && highest >= cc->zone->compact_cached_free_pfn) {
zone             1416 mm/compaction.c 		cc->zone->compact_cached_free_pfn = highest;
zone             1434 mm/compaction.c 	struct zone *zone = cc->zone;
zone             1462 mm/compaction.c 						zone_end_pfn(zone));
zone             1485 mm/compaction.c 									zone);
zone             1642 mm/compaction.c 	if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
zone             1669 mm/compaction.c 	if (cc->migrate_pfn != cc->zone->zone_start_pfn)
zone             1676 mm/compaction.c 		struct free_area *area = &cc->zone->free_area[order];
zone             1684 mm/compaction.c 		spin_lock_irqsave(&cc->zone->lock, flags);
zone             1721 mm/compaction.c 		spin_unlock_irqrestore(&cc->zone->lock, flags);
zone             1759 mm/compaction.c 	if (block_start_pfn < cc->zone->zone_start_pfn)
zone             1760 mm/compaction.c 		block_start_pfn = cc->zone->zone_start_pfn;
zone             1791 mm/compaction.c 						block_end_pfn, cc->zone);
zone             1858 mm/compaction.c 		reset_cached_positions(cc->zone);
zone             1867 mm/compaction.c 			cc->zone->compact_blockskip_flush = true;
zone             1890 mm/compaction.c 		struct free_area *area = &cc->zone->free_area[order];
zone             1944 mm/compaction.c 	trace_mm_compaction_finished(cc->zone, cc->order, ret);
zone             1958 mm/compaction.c static enum compact_result __compaction_suitable(struct zone *zone, int order,
zone             1968 mm/compaction.c 	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
zone             1973 mm/compaction.c 	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
zone             1992 mm/compaction.c 				low_wmark_pages(zone) : min_wmark_pages(zone);
zone             1994 mm/compaction.c 	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
zone             2001 mm/compaction.c enum compact_result compaction_suitable(struct zone *zone, int order,
zone             2008 mm/compaction.c 	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
zone             2009 mm/compaction.c 				    zone_page_state(zone, NR_FREE_PAGES));
zone             2027 mm/compaction.c 		fragindex = fragmentation_index(zone, order);
zone             2032 mm/compaction.c 	trace_mm_compaction_suitable(zone, order, ret);
zone             2042 mm/compaction.c 	struct zone *zone;
zone             2049 mm/compaction.c 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
zone             2060 mm/compaction.c 		available = zone_reclaimable_pages(zone) / order;
zone             2061 mm/compaction.c 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
zone             2062 mm/compaction.c 		compact_result = __compaction_suitable(zone, order, alloc_flags,
zone             2075 mm/compaction.c 	unsigned long start_pfn = cc->zone->zone_start_pfn;
zone             2076 mm/compaction.c 	unsigned long end_pfn = zone_end_pfn(cc->zone);
zone             2093 mm/compaction.c 	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
zone             2106 mm/compaction.c 	if (compaction_restarting(cc->zone, cc->order))
zone             2107 mm/compaction.c 		__reset_isolation_suitable(cc->zone);
zone             2120 mm/compaction.c 		cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
zone             2121 mm/compaction.c 		cc->free_pfn = cc->zone->compact_cached_free_pfn;
zone             2124 mm/compaction.c 			cc->zone->compact_cached_free_pfn = cc->free_pfn;
zone             2128 mm/compaction.c 			cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
zone             2129 mm/compaction.c 			cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
zone             2132 mm/compaction.c 		if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
zone             2147 mm/compaction.c 		cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
zone             2181 mm/compaction.c 				cc->zone->compact_cached_migrate_pfn[1] =
zone             2182 mm/compaction.c 					cc->zone->compact_cached_migrate_pfn[0];
zone             2245 mm/compaction.c 				drain_local_pages(cc->zone);
zone             2275 mm/compaction.c 		if (free_pfn > cc->zone->compact_cached_free_pfn)
zone             2276 mm/compaction.c 			cc->zone->compact_cached_free_pfn = free_pfn;
zone             2288 mm/compaction.c static enum compact_result compact_zone_order(struct zone *zone, int order,
zone             2298 mm/compaction.c 		.zone = zone,
zone             2345 mm/compaction.c 	struct zone *zone;
zone             2358 mm/compaction.c 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
zone             2363 mm/compaction.c 					&& compaction_deferred(zone, order)) {
zone             2368 mm/compaction.c 		status = compact_zone_order(zone, order, gfp_mask, prio,
zone             2380 mm/compaction.c 			compaction_defer_reset(zone, order, false);
zone             2392 mm/compaction.c 			defer_compaction(zone, order);
zone             2413 mm/compaction.c 	struct zone *zone;
zone             2425 mm/compaction.c 		zone = &pgdat->node_zones[zoneid];
zone             2426 mm/compaction.c 		if (!populated_zone(zone))
zone             2429 mm/compaction.c 		cc.zone = zone;
zone             2503 mm/compaction.c 	struct zone *zone;
zone             2507 mm/compaction.c 		zone = &pgdat->node_zones[zoneid];
zone             2509 mm/compaction.c 		if (!populated_zone(zone))
zone             2512 mm/compaction.c 		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
zone             2527 mm/compaction.c 	struct zone *zone;
zone             2543 mm/compaction.c 		zone = &pgdat->node_zones[zoneid];
zone             2544 mm/compaction.c 		if (!populated_zone(zone))
zone             2547 mm/compaction.c 		if (compaction_deferred(zone, cc.order))
zone             2550 mm/compaction.c 		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
zone             2557 mm/compaction.c 		cc.zone = zone;
zone             2561 mm/compaction.c 			compaction_defer_reset(zone, cc.order, false);
zone             2569 mm/compaction.c 			drain_all_pages(zone);
zone             2575 mm/compaction.c 			defer_compaction(zone, cc.order);
zone              115 mm/highmem.c   	struct zone *zone;
zone              118 mm/highmem.c   	for_each_populated_zone(zone) {
zone              119 mm/highmem.c   		if (is_highmem(zone))
zone              120 mm/highmem.c   			pages += zone_page_state(zone, NR_FREE_PAGES);
zone             2969 mm/huge_memory.c 	struct zone *zone;
zone             2977 mm/huge_memory.c 	for_each_populated_zone(zone) {
zone             2978 mm/huge_memory.c 		max_zone_pfn = zone_end_pfn(zone);
zone             2979 mm/huge_memory.c 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
zone             2987 mm/huge_memory.c 			if (zone != page_zone(page))
zone              899 mm/hugetlb.c   	struct zone *zone;
zone              907 mm/hugetlb.c   	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
zone              910 mm/hugetlb.c   		if (!cpuset_zone_allowed(zone, gfp_mask))
zone              916 mm/hugetlb.c   		if (zone_to_nid(zone) == node)
zone              918 mm/hugetlb.c   		node = zone_to_nid(zone);
zone             1081 mm/hugetlb.c   static bool pfn_range_valid_gigantic(struct zone *z,
zone             1108 mm/hugetlb.c   static bool zone_spans_last_pfn(const struct zone *zone,
zone             1112 mm/hugetlb.c   	return zone_spans_pfn(zone, last_pfn);
zone             1122 mm/hugetlb.c   	struct zone *zone;
zone             1126 mm/hugetlb.c   	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
zone             1127 mm/hugetlb.c   		spin_lock_irqsave(&zone->lock, flags);
zone             1129 mm/hugetlb.c   		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
zone             1130 mm/hugetlb.c   		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
zone             1131 mm/hugetlb.c   			if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
zone             1139 mm/hugetlb.c   				spin_unlock_irqrestore(&zone->lock, flags);
zone             1143 mm/hugetlb.c   				spin_lock_irqsave(&zone->lock, flags);
zone             1148 mm/hugetlb.c   		spin_unlock_irqrestore(&zone->lock, flags);
zone              148 mm/internal.h  				unsigned long end_pfn, struct zone *zone);
zone              151 mm/internal.h  				unsigned long end_pfn, struct zone *zone)
zone              153 mm/internal.h  	if (zone->contiguous)
zone              156 mm/internal.h  	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
zone              188 mm/internal.h  	struct zone *zone;
zone              500 mm/internal.h  unsigned long reclaim_clean_pages_from_list(struct zone *zone,
zone              573 mm/internal.h  void setup_zone_pageset(struct zone *zone);
zone             1707 mm/khugepaged.c 		struct zone *zone = page_zone(new_page);
zone             1709 mm/khugepaged.c 		__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
zone             1711 mm/khugepaged.c 			__mod_node_page_state(zone->zone_pgdat,
zone             2126 mm/khugepaged.c 	struct zone *zone;
zone             2130 mm/khugepaged.c 	for_each_populated_zone(zone) {
zone             2135 mm/khugepaged.c 		if (zone_idx(zone) > gfp_zone(GFP_USER))
zone              872 mm/madvise.c   	struct zone *zone;
zone              926 mm/madvise.c   	for_each_populated_zone(zone)
zone              927 mm/madvise.c   		drain_all_pages(zone);
zone             1271 mm/memblock.c  __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
zone             1274 mm/memblock.c  	int zone_nid = zone_to_nid(zone);
zone             1290 mm/memblock.c  		if (zone->zone_start_pfn < epfn && spfn < epfn) {
zone             1292 mm/memblock.c  			if (zone_end_pfn(zone) <= spfn) {
zone             1298 mm/memblock.c  				*out_spfn = max(zone->zone_start_pfn, spfn);
zone             1300 mm/memblock.c  				*out_epfn = min(zone_end_pfn(zone), epfn);
zone             1950 mm/memblock.c  	struct zone *z;
zone              329 mm/memory_hotplug.c static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
zone              340 mm/memory_hotplug.c 		if (zone && zone != page_zone(pfn_to_page(start_pfn)))
zone              350 mm/memory_hotplug.c static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
zone              365 mm/memory_hotplug.c 		if (zone && zone != page_zone(pfn_to_page(pfn)))
zone              374 mm/memory_hotplug.c static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
zone              377 mm/memory_hotplug.c 	unsigned long zone_start_pfn = zone->zone_start_pfn;
zone              378 mm/memory_hotplug.c 	unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
zone              381 mm/memory_hotplug.c 	int nid = zone_to_nid(zone);
zone              383 mm/memory_hotplug.c 	zone_span_writelock(zone);
zone              391 mm/memory_hotplug.c 		pfn = find_smallest_section_pfn(nid, zone, end_pfn,
zone              394 mm/memory_hotplug.c 			zone->zone_start_pfn = pfn;
zone              395 mm/memory_hotplug.c 			zone->spanned_pages = zone_end_pfn - pfn;
zone              404 mm/memory_hotplug.c 		pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
zone              407 mm/memory_hotplug.c 			zone->spanned_pages = pfn - zone_start_pfn + 1;
zone              421 mm/memory_hotplug.c 		if (page_zone(pfn_to_page(pfn)) != zone)
zone              429 mm/memory_hotplug.c 		zone_span_writeunlock(zone);
zone              434 mm/memory_hotplug.c 	zone->zone_start_pfn = 0;
zone              435 mm/memory_hotplug.c 	zone->spanned_pages = 0;
zone              436 mm/memory_hotplug.c 	zone_span_writeunlock(zone);
zone              442 mm/memory_hotplug.c 	struct zone *zone;
zone              444 mm/memory_hotplug.c 	for (zone = pgdat->node_zones;
zone              445 mm/memory_hotplug.c 	     zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
zone              446 mm/memory_hotplug.c 		unsigned long zone_end_pfn = zone->zone_start_pfn +
zone              447 mm/memory_hotplug.c 					     zone->spanned_pages;
zone              450 mm/memory_hotplug.c 		if (!zone->spanned_pages)
zone              453 mm/memory_hotplug.c 			node_start_pfn = zone->zone_start_pfn;
zone              460 mm/memory_hotplug.c 		if (zone->zone_start_pfn < node_start_pfn)
zone              461 mm/memory_hotplug.c 			node_start_pfn = zone->zone_start_pfn;
zone              468 mm/memory_hotplug.c void __ref remove_pfn_range_from_zone(struct zone *zone,
zone              472 mm/memory_hotplug.c 	struct pglist_data *pgdat = zone->zone_pgdat;
zone              481 mm/memory_hotplug.c 	if (zone_idx(zone) == ZONE_DEVICE)
zone              485 mm/memory_hotplug.c 	clear_zone_contiguous(zone);
zone              487 mm/memory_hotplug.c 	pgdat_resize_lock(zone->zone_pgdat, &flags);
zone              488 mm/memory_hotplug.c 	shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
zone              490 mm/memory_hotplug.c 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
zone              492 mm/memory_hotplug.c 	set_zone_contiguous(zone);
zone              645 mm/memory_hotplug.c 	struct zone *zone, struct memory_notify *arg)
zone              647 mm/memory_hotplug.c 	int nid = zone_to_nid(zone);
zone              655 mm/memory_hotplug.c 	if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY))
zone              658 mm/memory_hotplug.c 	if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY))
zone              675 mm/memory_hotplug.c static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
zone              678 mm/memory_hotplug.c 	unsigned long old_end_pfn = zone_end_pfn(zone);
zone              680 mm/memory_hotplug.c 	if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
zone              681 mm/memory_hotplug.c 		zone->zone_start_pfn = start_pfn;
zone              683 mm/memory_hotplug.c 	zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
zone              702 mm/memory_hotplug.c void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
zone              705 mm/memory_hotplug.c 	struct pglist_data *pgdat = zone->zone_pgdat;
zone              709 mm/memory_hotplug.c 	clear_zone_contiguous(zone);
zone              713 mm/memory_hotplug.c 	zone_span_writelock(zone);
zone              714 mm/memory_hotplug.c 	if (zone_is_empty(zone))
zone              715 mm/memory_hotplug.c 		init_currently_empty_zone(zone, start_pfn, nr_pages);
zone              716 mm/memory_hotplug.c 	resize_zone_range(zone, start_pfn, nr_pages);
zone              717 mm/memory_hotplug.c 	zone_span_writeunlock(zone);
zone              727 mm/memory_hotplug.c 	memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
zone              730 mm/memory_hotplug.c 	set_zone_contiguous(zone);
zone              738 mm/memory_hotplug.c static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
zone              745 mm/memory_hotplug.c 		struct zone *zone = &pgdat->node_zones[zid];
zone              747 mm/memory_hotplug.c 		if (zone_intersects(zone, start_pfn, nr_pages))
zone              748 mm/memory_hotplug.c 			return zone;
zone              754 mm/memory_hotplug.c static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
zone              757 mm/memory_hotplug.c 	struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
zone              759 mm/memory_hotplug.c 	struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
zone              778 mm/memory_hotplug.c struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
zone              794 mm/memory_hotplug.c 	struct zone *zone;
zone              812 mm/memory_hotplug.c 	zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
zone              813 mm/memory_hotplug.c 	move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
zone              817 mm/memory_hotplug.c 	node_states_check_changes_online(nr_pages, zone, &arg);
zone              829 mm/memory_hotplug.c 	if (!populated_zone(zone)) {
zone              831 mm/memory_hotplug.c 		setup_zone_pageset(zone);
zone              839 mm/memory_hotplug.c 			zone_pcp_reset(zone);
zone              843 mm/memory_hotplug.c 	zone->present_pages += onlined_pages;
zone              845 mm/memory_hotplug.c 	pgdat_resize_lock(zone->zone_pgdat, &flags);
zone              846 mm/memory_hotplug.c 	zone->zone_pgdat->node_present_pages += onlined_pages;
zone              847 mm/memory_hotplug.c 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
zone              849 mm/memory_hotplug.c 	shuffle_zone(zone);
zone              855 mm/memory_hotplug.c 		zone_pcp_update(zone);
zone              875 mm/memory_hotplug.c 	remove_pfn_range_from_zone(zone, pfn, nr_pages);
zone              883 mm/memory_hotplug.c 	struct zone *z;
zone             1172 mm/memory_hotplug.c 	struct zone *zone;
zone             1184 mm/memory_hotplug.c 	zone = page_zone(page);
zone             1186 mm/memory_hotplug.c 	if (!zone_spans_pfn(zone, pfn))
zone             1189 mm/memory_hotplug.c 	return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON);
zone             1220 mm/memory_hotplug.c 	struct zone *zone = NULL;
zone             1239 mm/memory_hotplug.c 			if (zone && !zone_spans_pfn(zone, pfn + i))
zone             1242 mm/memory_hotplug.c 			if (zone && page_zone(page) != zone)
zone             1244 mm/memory_hotplug.c 			if (!zone)
zone             1246 mm/memory_hotplug.c 			zone = page_zone(page);
zone             1251 mm/memory_hotplug.c 	if (zone) {
zone             1422 mm/memory_hotplug.c 		struct zone *zone, struct memory_notify *arg)
zone             1424 mm/memory_hotplug.c 	struct pglist_data *pgdat = zone->zone_pgdat;
zone             1442 mm/memory_hotplug.c 	if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages)
zone             1443 mm/memory_hotplug.c 		arg->status_change_nid_normal = zone_to_nid(zone);
zone             1455 mm/memory_hotplug.c 	if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages)
zone             1456 mm/memory_hotplug.c 		arg->status_change_nid_high = zone_to_nid(zone);
zone             1472 mm/memory_hotplug.c 		arg->status_change_nid = zone_to_nid(zone);
zone             1495 mm/memory_hotplug.c 	struct zone *zone;
zone             1510 mm/memory_hotplug.c 	zone = page_zone(pfn_to_page(valid_start));
zone             1511 mm/memory_hotplug.c 	node = zone_to_nid(zone);
zone             1526 mm/memory_hotplug.c 	node_states_check_changes_offline(nr_pages, zone, &arg);
zone             1581 mm/memory_hotplug.c 	spin_lock_irqsave(&zone->lock, flags);
zone             1582 mm/memory_hotplug.c 	zone->nr_isolate_pageblock -= nr_isolate_pageblock;
zone             1583 mm/memory_hotplug.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone             1587 mm/memory_hotplug.c 	zone->present_pages -= offlined_pages;
zone             1589 mm/memory_hotplug.c 	pgdat_resize_lock(zone->zone_pgdat, &flags);
zone             1590 mm/memory_hotplug.c 	zone->zone_pgdat->node_present_pages -= offlined_pages;
zone             1591 mm/memory_hotplug.c 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
zone             1595 mm/memory_hotplug.c 	if (!populated_zone(zone)) {
zone             1596 mm/memory_hotplug.c 		zone_pcp_reset(zone);
zone             1599 mm/memory_hotplug.c 		zone_pcp_update(zone);
zone             1611 mm/memory_hotplug.c 	remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
zone             1772 mm/mempolicy.c static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
zone             1789 mm/mempolicy.c 	return zone >= dynamic_policy_zone;
zone             1875 mm/mempolicy.c 		return z->zone ? zone_to_nid(z->zone) : node;
zone             2428 mm/mempolicy.c 		polnid = zone_to_nid(z->zone);
zone              288 mm/memremap.c  		struct zone *zone;
zone              290 mm/memremap.c  		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
zone              291 mm/memremap.c  		move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
zone              401 mm/migrate.c   	struct zone *oldzone, *newzone;
zone             1889 mm/migrate.c   		struct zone *zone = pgdat->node_zones + z;
zone             1891 mm/migrate.c   		if (!populated_zone(zone))
zone             1895 mm/migrate.c   		if (!zone_watermark_ok(zone, 0,
zone             1896 mm/migrate.c   				       high_wmark_pages(zone) +
zone              290 mm/mlock.c     static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
zone              301 mm/mlock.c     	spin_lock_irq(&zone->zone_pgdat->lru_lock);
zone              327 mm/mlock.c     	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
zone              328 mm/mlock.c     	spin_unlock_irq(&zone->zone_pgdat->lru_lock);
zone              375 mm/mlock.c     			struct vm_area_struct *vma, struct zone *zone,
zone              404 mm/mlock.c     		if (!page || page_zone(page) != zone)
zone              455 mm/mlock.c     		struct zone *zone;
zone              489 mm/mlock.c     				zone = page_zone(page);
zone              498 mm/mlock.c     						zone, start, end);
zone              499 mm/mlock.c     				__munlock_pagevec(&pvec, zone);
zone               35 mm/mm_init.c   		struct zone *zone;
zone               47 mm/mm_init.c   			zone = &pgdat->node_zones[zoneid];
zone               48 mm/mm_init.c   			if (!populated_zone(zone))
zone               54 mm/mm_init.c   				zone->name);
zone               57 mm/mm_init.c   			for_each_zone_zonelist(zone, z, zonelist, zoneid)
zone               58 mm/mm_init.c   				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
zone               30 mm/mmzone.c    struct zone *next_zone(struct zone *zone)
zone               32 mm/mmzone.c    	pg_data_t *pgdat = zone->zone_pgdat;
zone               34 mm/mmzone.c    	if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
zone               35 mm/mmzone.c    		zone++;
zone               39 mm/mmzone.c    			zone = pgdat->node_zones;
zone               41 mm/mmzone.c    			zone = NULL;
zone               43 mm/mmzone.c    	return zone;
zone               69 mm/mmzone.c    				(z->zone && !zref_in_nodemask(z, nodes)))
zone               77 mm/mmzone.c    					struct page *page, struct zone *zone)
zone               82 mm/mmzone.c    	if (page_zone(page) != zone)
zone              254 mm/oom_kill.c  	struct zone *zone;
zone              295 mm/oom_kill.c  	for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
zone              297 mm/oom_kill.c  		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
zone              283 mm/page-writeback.c 		struct zone *zone = pgdat->node_zones + z;
zone              285 mm/page-writeback.c 		if (!populated_zone(zone))
zone              288 mm/page-writeback.c 		nr_pages += zone_page_state(zone, NR_FREE_PAGES);
zone              313 mm/page-writeback.c 			struct zone *z;
zone              103 mm/page_alloc.c 	struct zone *zone;
zone              561 mm/page_alloc.c static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
zone              569 mm/page_alloc.c 		seq = zone_span_seqbegin(zone);
zone              570 mm/page_alloc.c 		start_pfn = zone->zone_start_pfn;
zone              571 mm/page_alloc.c 		sp = zone->spanned_pages;
zone              572 mm/page_alloc.c 		if (!zone_spans_pfn(zone, pfn))
zone              574 mm/page_alloc.c 	} while (zone_span_seqretry(zone, seq));
zone              578 mm/page_alloc.c 			pfn, zone_to_nid(zone), zone->name,
zone              584 mm/page_alloc.c static int page_is_consistent(struct zone *zone, struct page *page)
zone              588 mm/page_alloc.c 	if (zone != page_zone(page))
zone              596 mm/page_alloc.c static int __maybe_unused bad_range(struct zone *zone, struct page *page)
zone              598 mm/page_alloc.c 	if (page_outside_zone_boundaries(zone, page))
zone              600 mm/page_alloc.c 	if (!page_is_consistent(zone, page))
zone              606 mm/page_alloc.c static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
zone              738 mm/page_alloc.c static inline bool set_page_guard(struct zone *zone, struct page *page,
zone              751 mm/page_alloc.c 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
zone              756 mm/page_alloc.c static inline void clear_page_guard(struct zone *zone, struct page *page,
zone              766 mm/page_alloc.c 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
zone              769 mm/page_alloc.c static inline bool set_page_guard(struct zone *zone, struct page *page,
zone              771 mm/page_alloc.c static inline void clear_page_guard(struct zone *zone, struct page *page,
zone              823 mm/page_alloc.c static inline struct capture_control *task_capc(struct zone *zone)
zone              830 mm/page_alloc.c 		capc->cc->zone == zone &&
zone              860 mm/page_alloc.c static inline struct capture_control *task_capc(struct zone *zone)
zone              899 mm/page_alloc.c 		struct zone *zone, unsigned int order,
zone              906 mm/page_alloc.c 	struct capture_control *capc = task_capc(zone);
zone              910 mm/page_alloc.c 	VM_BUG_ON(!zone_is_initialized(zone));
zone              915 mm/page_alloc.c 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
zone              918 mm/page_alloc.c 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
zone              923 mm/page_alloc.c 			__mod_zone_freepage_state(zone, -(1 << order),
zone              939 mm/page_alloc.c 			clear_page_guard(zone, buddy, order, migratetype);
zone              941 mm/page_alloc.c 			del_page_from_free_area(buddy, &zone->free_area[order]);
zone              956 mm/page_alloc.c 		if (unlikely(has_isolate_pageblock(zone))) {
zone              992 mm/page_alloc.c 			add_to_free_area_tail(page, &zone->free_area[order],
zone              999 mm/page_alloc.c 		add_to_free_area_random(page, &zone->free_area[order],
zone             1002 mm/page_alloc.c 		add_to_free_area(page, &zone->free_area[order], migratetype);
zone             1249 mm/page_alloc.c static void free_pcppages_bulk(struct zone *zone, int count,
zone             1305 mm/page_alloc.c 	spin_lock(&zone->lock);
zone             1306 mm/page_alloc.c 	isolated_pageblocks = has_isolate_pageblock(zone);
zone             1320 mm/page_alloc.c 		__free_one_page(page, page_to_pfn(page), zone, 0, mt);
zone             1323 mm/page_alloc.c 	spin_unlock(&zone->lock);
zone             1326 mm/page_alloc.c static void free_one_page(struct zone *zone,
zone             1331 mm/page_alloc.c 	spin_lock(&zone->lock);
zone             1332 mm/page_alloc.c 	if (unlikely(has_isolate_pageblock(zone) ||
zone             1336 mm/page_alloc.c 	__free_one_page(page, pfn, zone, order, migratetype);
zone             1337 mm/page_alloc.c 	spin_unlock(&zone->lock);
zone             1341 mm/page_alloc.c 				unsigned long zone, int nid)
zone             1344 mm/page_alloc.c 	set_page_links(page, zone, nid, pfn);
zone             1353 mm/page_alloc.c 	if (!is_highmem_idx(zone))
zone             1371 mm/page_alloc.c 		struct zone *zone = &pgdat->node_zones[zid];
zone             1373 mm/page_alloc.c 		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
zone             1516 mm/page_alloc.c 				     unsigned long end_pfn, struct zone *zone)
zone             1531 mm/page_alloc.c 	if (page_zone(start_page) != zone)
zone             1543 mm/page_alloc.c void set_zone_contiguous(struct zone *zone)
zone             1545 mm/page_alloc.c 	unsigned long block_start_pfn = zone->zone_start_pfn;
zone             1549 mm/page_alloc.c 	for (; block_start_pfn < zone_end_pfn(zone);
zone             1553 mm/page_alloc.c 		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
zone             1556 mm/page_alloc.c 					     block_end_pfn, zone))
zone             1562 mm/page_alloc.c 	zone->contiguous = true;
zone             1565 mm/page_alloc.c void clear_zone_contiguous(struct zone *zone)
zone             1567 mm/page_alloc.c 	zone->contiguous = false;
zone             1657 mm/page_alloc.c static unsigned long  __init deferred_init_pages(struct zone *zone,
zone             1662 mm/page_alloc.c 	int nid = zone_to_nid(zone);
zone             1664 mm/page_alloc.c 	int zid = zone_idx(zone);
zone             1690 mm/page_alloc.c deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
zone             1701 mm/page_alloc.c 	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
zone             1724 mm/page_alloc.c deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
zone             1733 mm/page_alloc.c 	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
zone             1740 mm/page_alloc.c 		nr_pages += deferred_init_pages(zone, *start_pfn, t);
zone             1751 mm/page_alloc.c 	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
zone             1775 mm/page_alloc.c 	struct zone *zone;
zone             1798 mm/page_alloc.c 		zone = pgdat->node_zones + zid;
zone             1799 mm/page_alloc.c 		if (first_init_pfn < zone_end_pfn(zone))
zone             1804 mm/page_alloc.c 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
zone             1814 mm/page_alloc.c 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
zone             1819 mm/page_alloc.c 	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
zone             1844 mm/page_alloc.c deferred_grow_zone(struct zone *zone, unsigned int order)
zone             1847 mm/page_alloc.c 	pg_data_t *pgdat = zone->zone_pgdat;
zone             1854 mm/page_alloc.c 	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
zone             1880 mm/page_alloc.c 	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
zone             1897 mm/page_alloc.c 		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
zone             1921 mm/page_alloc.c _deferred_grow_zone(struct zone *zone, unsigned int order)
zone             1923 mm/page_alloc.c 	return deferred_grow_zone(zone, order);
zone             1930 mm/page_alloc.c 	struct zone *zone;
zone             1949 mm/page_alloc.c 	for_each_populated_zone(zone)
zone             1950 mm/page_alloc.c 		zone_pcp_update(zone);
zone             1968 mm/page_alloc.c 	for_each_populated_zone(zone)
zone             1969 mm/page_alloc.c 		set_zone_contiguous(zone);
zone             2017 mm/page_alloc.c static inline void expand(struct zone *zone, struct page *page,
zone             2027 mm/page_alloc.c 		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
zone             2035 mm/page_alloc.c 		if (set_page_guard(zone, &page[size], high, migratetype))
zone             2183 mm/page_alloc.c struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
zone             2192 mm/page_alloc.c 		area = &(zone->free_area[current_order]);
zone             2197 mm/page_alloc.c 		expand(zone, page, order, current_order, area, migratetype);
zone             2223 mm/page_alloc.c static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
zone             2226 mm/page_alloc.c 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
zone             2229 mm/page_alloc.c static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
zone             2238 mm/page_alloc.c static int move_freepages(struct zone *zone,
zone             2267 mm/page_alloc.c 		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
zone             2268 mm/page_alloc.c 		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
zone             2271 mm/page_alloc.c 		move_to_free_area(page, &zone->free_area[order], migratetype);
zone             2279 mm/page_alloc.c int move_freepages_block(struct zone *zone, struct page *page,
zone             2295 mm/page_alloc.c 	if (!zone_spans_pfn(zone, start_pfn))
zone             2297 mm/page_alloc.c 	if (!zone_spans_pfn(zone, end_pfn))
zone             2300 mm/page_alloc.c 	return move_freepages(zone, start_page, end_page, migratetype,
zone             2348 mm/page_alloc.c static inline void boost_watermark(struct zone *zone)
zone             2360 mm/page_alloc.c 	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
zone             2363 mm/page_alloc.c 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
zone             2379 mm/page_alloc.c 	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
zone             2391 mm/page_alloc.c static void steal_suitable_fallback(struct zone *zone, struct page *page,
zone             2419 mm/page_alloc.c 	boost_watermark(zone);
zone             2421 mm/page_alloc.c 		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
zone             2427 mm/page_alloc.c 	free_pages = move_freepages_block(zone, page, start_type,
zone             2466 mm/page_alloc.c 	area = &zone->free_area[current_order];
zone             2511 mm/page_alloc.c static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
zone             2521 mm/page_alloc.c 	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
zone             2522 mm/page_alloc.c 	if (zone->nr_reserved_highatomic >= max_managed)
zone             2525 mm/page_alloc.c 	spin_lock_irqsave(&zone->lock, flags);
zone             2528 mm/page_alloc.c 	if (zone->nr_reserved_highatomic >= max_managed)
zone             2535 mm/page_alloc.c 		zone->nr_reserved_highatomic += pageblock_nr_pages;
zone             2537 mm/page_alloc.c 		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
zone             2541 mm/page_alloc.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone             2559 mm/page_alloc.c 	struct zone *zone;
zone             2564 mm/page_alloc.c 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
zone             2570 mm/page_alloc.c 		if (!force && zone->nr_reserved_highatomic <=
zone             2574 mm/page_alloc.c 		spin_lock_irqsave(&zone->lock, flags);
zone             2576 mm/page_alloc.c 			struct free_area *area = &(zone->free_area[order]);
zone             2597 mm/page_alloc.c 				zone->nr_reserved_highatomic -= min(
zone             2599 mm/page_alloc.c 						zone->nr_reserved_highatomic);
zone             2612 mm/page_alloc.c 			ret = move_freepages_block(zone, page, ac->migratetype,
zone             2615 mm/page_alloc.c 				spin_unlock_irqrestore(&zone->lock, flags);
zone             2619 mm/page_alloc.c 		spin_unlock_irqrestore(&zone->lock, flags);
zone             2636 mm/page_alloc.c __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
zone             2661 mm/page_alloc.c 		area = &(zone->free_area[current_order]);
zone             2687 mm/page_alloc.c 		area = &(zone->free_area[current_order]);
zone             2703 mm/page_alloc.c 	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
zone             2718 mm/page_alloc.c __rmqueue(struct zone *zone, unsigned int order, int migratetype,
zone             2724 mm/page_alloc.c 	page = __rmqueue_smallest(zone, order, migratetype);
zone             2727 mm/page_alloc.c 			page = __rmqueue_cma_fallback(zone, order);
zone             2729 mm/page_alloc.c 		if (!page && __rmqueue_fallback(zone, order, migratetype,
zone             2743 mm/page_alloc.c static int rmqueue_bulk(struct zone *zone, unsigned int order,
zone             2749 mm/page_alloc.c 	spin_lock(&zone->lock);
zone             2751 mm/page_alloc.c 		struct page *page = __rmqueue(zone, order, migratetype,
zone             2772 mm/page_alloc.c 			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
zone             2782 mm/page_alloc.c 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
zone             2783 mm/page_alloc.c 	spin_unlock(&zone->lock);
zone             2796 mm/page_alloc.c void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
zone             2805 mm/page_alloc.c 		free_pcppages_bulk(zone, to_drain, pcp);
zone             2817 mm/page_alloc.c static void drain_pages_zone(unsigned int cpu, struct zone *zone)
zone             2824 mm/page_alloc.c 	pset = per_cpu_ptr(zone->pageset, cpu);
zone             2828 mm/page_alloc.c 		free_pcppages_bulk(zone, pcp->count, pcp);
zone             2841 mm/page_alloc.c 	struct zone *zone;
zone             2843 mm/page_alloc.c 	for_each_populated_zone(zone) {
zone             2844 mm/page_alloc.c 		drain_pages_zone(cpu, zone);
zone             2854 mm/page_alloc.c void drain_local_pages(struct zone *zone)
zone             2858 mm/page_alloc.c 	if (zone)
zone             2859 mm/page_alloc.c 		drain_pages_zone(cpu, zone);
zone             2878 mm/page_alloc.c 	drain_local_pages(drain->zone);
zone             2889 mm/page_alloc.c void drain_all_pages(struct zone *zone)
zone             2912 mm/page_alloc.c 		if (!zone)
zone             2925 mm/page_alloc.c 		struct zone *z;
zone             2928 mm/page_alloc.c 		if (zone) {
zone             2929 mm/page_alloc.c 			pcp = per_cpu_ptr(zone->pageset, cpu);
zone             2951 mm/page_alloc.c 		drain->zone = zone;
zone             2968 mm/page_alloc.c void mark_free_pages(struct zone *zone)
zone             2975 mm/page_alloc.c 	if (zone_is_empty(zone))
zone             2978 mm/page_alloc.c 	spin_lock_irqsave(&zone->lock, flags);
zone             2980 mm/page_alloc.c 	max_zone_pfn = zone_end_pfn(zone);
zone             2981 mm/page_alloc.c 	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
zone             2990 mm/page_alloc.c 			if (page_zone(page) != zone)
zone             2999 mm/page_alloc.c 				&zone->free_area[order].free_list[t], lru) {
zone             3012 mm/page_alloc.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone             3030 mm/page_alloc.c 	struct zone *zone = page_zone(page);
zone             3046 mm/page_alloc.c 			free_one_page(zone, page, pfn, 0, migratetype);
zone             3052 mm/page_alloc.c 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
zone             3057 mm/page_alloc.c 		free_pcppages_bulk(zone, batch, pcp);
zone             3140 mm/page_alloc.c 	struct zone *zone;
zone             3145 mm/page_alloc.c 	zone = page_zone(page);
zone             3155 mm/page_alloc.c 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
zone             3156 mm/page_alloc.c 		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
zone             3159 mm/page_alloc.c 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
zone             3190 mm/page_alloc.c static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
zone             3213 mm/page_alloc.c static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
zone             3222 mm/page_alloc.c 			pcp->count += rmqueue_bulk(zone, 0,
zone             3238 mm/page_alloc.c static struct page *rmqueue_pcplist(struct zone *preferred_zone,
zone             3239 mm/page_alloc.c 			struct zone *zone, gfp_t gfp_flags,
zone             3248 mm/page_alloc.c 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
zone             3250 mm/page_alloc.c 	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
zone             3253 mm/page_alloc.c 		zone_statistics(preferred_zone, zone);
zone             3263 mm/page_alloc.c struct page *rmqueue(struct zone *preferred_zone,
zone             3264 mm/page_alloc.c 			struct zone *zone, unsigned int order,
zone             3272 mm/page_alloc.c 		page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
zone             3282 mm/page_alloc.c 	spin_lock_irqsave(&zone->lock, flags);
zone             3287 mm/page_alloc.c 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
zone             3292 mm/page_alloc.c 			page = __rmqueue(zone, order, migratetype, alloc_flags);
zone             3294 mm/page_alloc.c 	spin_unlock(&zone->lock);
zone             3297 mm/page_alloc.c 	__mod_zone_freepage_state(zone, -(1 << order),
zone             3301 mm/page_alloc.c 	zone_statistics(preferred_zone, zone);
zone             3306 mm/page_alloc.c 	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
zone             3307 mm/page_alloc.c 		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
zone             3308 mm/page_alloc.c 		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
zone             3311 mm/page_alloc.c 	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
zone             3399 mm/page_alloc.c bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
zone             3478 mm/page_alloc.c bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
zone             3485 mm/page_alloc.c static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
zone             3511 mm/page_alloc.c bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
zone             3524 mm/page_alloc.c static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
zone             3526 mm/page_alloc.c 	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
zone             3530 mm/page_alloc.c static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
zone             3545 mm/page_alloc.c alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
zone             3553 mm/page_alloc.c 	if (!zone)
zone             3556 mm/page_alloc.c 	if (zone_idx(zone) != ZONE_NORMAL)
zone             3565 mm/page_alloc.c 	if (nr_online_nodes > 1 && !populated_zone(--zone))
zone             3582 mm/page_alloc.c 	struct zone *zone;
zone             3593 mm/page_alloc.c 	for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
zone             3600 mm/page_alloc.c 			!__cpuset_zone_allowed(zone, gfp_mask))
zone             3622 mm/page_alloc.c 			if (last_pgdat_dirty_limit == zone->zone_pgdat)
zone             3625 mm/page_alloc.c 			if (!node_dirty_ok(zone->zone_pgdat)) {
zone             3626 mm/page_alloc.c 				last_pgdat_dirty_limit = zone->zone_pgdat;
zone             3632 mm/page_alloc.c 		    zone != ac->preferred_zoneref->zone) {
zone             3640 mm/page_alloc.c 			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
zone             3641 mm/page_alloc.c 			if (zone_to_nid(zone) != local_nid) {
zone             3647 mm/page_alloc.c 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
zone             3648 mm/page_alloc.c 		if (!zone_watermark_fast(zone, order, mark,
zone             3658 mm/page_alloc.c 				if (_deferred_grow_zone(zone, order))
zone             3668 mm/page_alloc.c 			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
zone             3671 mm/page_alloc.c 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
zone             3681 mm/page_alloc.c 				if (zone_watermark_ok(zone, order, mark,
zone             3690 mm/page_alloc.c 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
zone             3700 mm/page_alloc.c 				reserve_highatomic_pageblock(page, zone, order);
zone             3707 mm/page_alloc.c 				if (_deferred_grow_zone(zone, order))
zone             3920 mm/page_alloc.c 		struct zone *zone = page_zone(page);
zone             3922 mm/page_alloc.c 		zone->compact_blockskip_flush = false;
zone             3923 mm/page_alloc.c 		compaction_defer_reset(zone, order, true);
zone             4032 mm/page_alloc.c 	struct zone *zone;
zone             4044 mm/page_alloc.c 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
zone             4046 mm/page_alloc.c 		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
zone             4169 mm/page_alloc.c 	struct zone *zone;
zone             4173 mm/page_alloc.c 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx,
zone             4175 mm/page_alloc.c 		if (last_pgdat != zone->zone_pgdat)
zone             4176 mm/page_alloc.c 			wakeup_kswapd(zone, gfp_mask, order, high_zoneidx);
zone             4177 mm/page_alloc.c 		last_pgdat = zone->zone_pgdat;
zone             4279 mm/page_alloc.c 	struct zone *zone;
zone             4308 mm/page_alloc.c 	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
zone             4312 mm/page_alloc.c 		unsigned long min_wmark = min_wmark_pages(zone);
zone             4315 mm/page_alloc.c 		available = reclaimable = zone_reclaimable_pages(zone);
zone             4316 mm/page_alloc.c 		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
zone             4322 mm/page_alloc.c 		wmark = __zone_watermark_ok(zone, order, min_wmark,
zone             4336 mm/page_alloc.c 				write_pending = zone_page_state_snapshot(zone,
zone             4443 mm/page_alloc.c 	if (!ac->preferred_zoneref->zone)
zone             4751 mm/page_alloc.c 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
zone             5047 mm/page_alloc.c 	struct zone *zone;
zone             5054 mm/page_alloc.c 	for_each_zone_zonelist(zone, z, zonelist, offset) {
zone             5055 mm/page_alloc.c 		unsigned long size = zone_managed_pages(zone);
zone             5056 mm/page_alloc.c 		unsigned long high = high_wmark_pages(zone);
zone             5092 mm/page_alloc.c static inline void show_node(struct zone *zone)
zone             5095 mm/page_alloc.c 		printk("Node %d ", zone_to_nid(zone));
zone             5105 mm/page_alloc.c 	struct zone *zone;
zone             5111 mm/page_alloc.c 	for_each_zone(zone)
zone             5112 mm/page_alloc.c 		wmark_low += low_wmark_pages(zone);
zone             5173 mm/page_alloc.c 		struct zone *zone = &pgdat->node_zones[zone_type];
zone             5175 mm/page_alloc.c 		if (is_highmem(zone)) {
zone             5176 mm/page_alloc.c 			managed_highpages += zone_managed_pages(zone);
zone             5177 mm/page_alloc.c 			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
zone             5252 mm/page_alloc.c 	struct zone *zone;
zone             5255 mm/page_alloc.c 	for_each_populated_zone(zone) {
zone             5256 mm/page_alloc.c 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
zone             5260 mm/page_alloc.c 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
zone             5338 mm/page_alloc.c 	for_each_populated_zone(zone) {
zone             5341 mm/page_alloc.c 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
zone             5346 mm/page_alloc.c 			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
zone             5348 mm/page_alloc.c 		show_node(zone);
zone             5371 mm/page_alloc.c 			zone->name,
zone             5372 mm/page_alloc.c 			K(zone_page_state(zone, NR_FREE_PAGES)),
zone             5373 mm/page_alloc.c 			K(min_wmark_pages(zone)),
zone             5374 mm/page_alloc.c 			K(low_wmark_pages(zone)),
zone             5375 mm/page_alloc.c 			K(high_wmark_pages(zone)),
zone             5376 mm/page_alloc.c 			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
zone             5377 mm/page_alloc.c 			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
zone             5378 mm/page_alloc.c 			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
zone             5379 mm/page_alloc.c 			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
zone             5380 mm/page_alloc.c 			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
zone             5381 mm/page_alloc.c 			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
zone             5382 mm/page_alloc.c 			K(zone->present_pages),
zone             5383 mm/page_alloc.c 			K(zone_managed_pages(zone)),
zone             5384 mm/page_alloc.c 			K(zone_page_state(zone, NR_MLOCK)),
zone             5385 mm/page_alloc.c 			zone_page_state(zone, NR_KERNEL_STACK_KB),
zone             5386 mm/page_alloc.c 			K(zone_page_state(zone, NR_PAGETABLE)),
zone             5387 mm/page_alloc.c 			K(zone_page_state(zone, NR_BOUNCE)),
zone             5389 mm/page_alloc.c 			K(this_cpu_read(zone->pageset->pcp.count)),
zone             5390 mm/page_alloc.c 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
zone             5393 mm/page_alloc.c 			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
zone             5397 mm/page_alloc.c 	for_each_populated_zone(zone) {
zone             5402 mm/page_alloc.c 		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
zone             5404 mm/page_alloc.c 		show_node(zone);
zone             5405 mm/page_alloc.c 		printk(KERN_CONT "%s: ", zone->name);
zone             5407 mm/page_alloc.c 		spin_lock_irqsave(&zone->lock, flags);
zone             5409 mm/page_alloc.c 			struct free_area *area = &zone->free_area[order];
zone             5421 mm/page_alloc.c 		spin_unlock_irqrestore(&zone->lock, flags);
zone             5438 mm/page_alloc.c static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
zone             5440 mm/page_alloc.c 	zoneref->zone = zone;
zone             5441 mm/page_alloc.c 	zoneref->zone_idx = zone_idx(zone);
zone             5451 mm/page_alloc.c 	struct zone *zone;
zone             5457 mm/page_alloc.c 		zone = pgdat->node_zones + zone_type;
zone             5458 mm/page_alloc.c 		if (managed_zone(zone)) {
zone             5459 mm/page_alloc.c 			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
zone             5603 mm/page_alloc.c 	zonerefs->zone = NULL;
zone             5618 mm/page_alloc.c 	zonerefs->zone = NULL;
zone             5676 mm/page_alloc.c 	return zone_to_nid(z->zone);
zone             5717 mm/page_alloc.c 	zonerefs->zone = NULL;
zone             5850 mm/page_alloc.c overlap_memmap_init(unsigned long zone, unsigned long *pfn)
zone             5855 mm/page_alloc.c 	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
zone             5877 mm/page_alloc.c void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
zone             5895 mm/page_alloc.c 	if (zone == ZONE_DEVICE) {
zone             5915 mm/page_alloc.c 			if (overlap_memmap_init(zone, &pfn))
zone             5922 mm/page_alloc.c 		__init_single_page(page, pfn, zone, nid);
zone             5946 mm/page_alloc.c void __ref memmap_init_zone_device(struct zone *zone,
zone             5952 mm/page_alloc.c 	struct pglist_data *pgdat = zone->zone_pgdat;
zone             5954 mm/page_alloc.c 	unsigned long zone_idx = zone_idx(zone);
zone             5958 mm/page_alloc.c 	if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
zone             6019 mm/page_alloc.c static void __meminit zone_init_free_lists(struct zone *zone)
zone             6023 mm/page_alloc.c 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
zone             6024 mm/page_alloc.c 		zone->free_area[order].nr_free = 0;
zone             6029 mm/page_alloc.c 				  unsigned long zone, unsigned long start_pfn)
zone             6031 mm/page_alloc.c 	memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL);
zone             6034 mm/page_alloc.c static int zone_batchsize(struct zone *zone)
zone             6043 mm/page_alloc.c 	batch = zone_managed_pages(zone) / 1024;
zone             6148 mm/page_alloc.c static void pageset_set_high_and_batch(struct zone *zone,
zone             6153 mm/page_alloc.c 			(zone_managed_pages(zone) /
zone             6156 mm/page_alloc.c 		pageset_set_batch(pcp, zone_batchsize(zone));
zone             6159 mm/page_alloc.c static void __meminit zone_pageset_init(struct zone *zone, int cpu)
zone             6161 mm/page_alloc.c 	struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
zone             6164 mm/page_alloc.c 	pageset_set_high_and_batch(zone, pcp);
zone             6167 mm/page_alloc.c void __meminit setup_zone_pageset(struct zone *zone)
zone             6170 mm/page_alloc.c 	zone->pageset = alloc_percpu(struct per_cpu_pageset);
zone             6172 mm/page_alloc.c 		zone_pageset_init(zone, cpu);
zone             6182 mm/page_alloc.c 	struct zone *zone;
zone             6184 mm/page_alloc.c 	for_each_populated_zone(zone)
zone             6185 mm/page_alloc.c 		setup_zone_pageset(zone);
zone             6192 mm/page_alloc.c static __meminit void zone_pcp_init(struct zone *zone)
zone             6199 mm/page_alloc.c 	zone->pageset = &boot_pageset;
zone             6201 mm/page_alloc.c 	if (populated_zone(zone))
zone             6203 mm/page_alloc.c 			zone->name, zone->present_pages,
zone             6204 mm/page_alloc.c 					 zone_batchsize(zone));
zone             6207 mm/page_alloc.c void __meminit init_currently_empty_zone(struct zone *zone,
zone             6211 mm/page_alloc.c 	struct pglist_data *pgdat = zone->zone_pgdat;
zone             6212 mm/page_alloc.c 	int zone_idx = zone_idx(zone) + 1;
zone             6217 mm/page_alloc.c 	zone->zone_start_pfn = zone_start_pfn;
zone             6222 mm/page_alloc.c 			(unsigned long)zone_idx(zone),
zone             6225 mm/page_alloc.c 	zone_init_free_lists(zone);
zone             6226 mm/page_alloc.c 	zone->initialized = 1;
zone             6514 mm/page_alloc.c 	unsigned int zone;
zone             6517 mm/page_alloc.c 	for (zone = 0; zone < zone_type; zone++)
zone             6518 mm/page_alloc.c 		*zone_start_pfn += zones_size[zone];
zone             6549 mm/page_alloc.c 		struct zone *zone = pgdat->node_zones + i;
zone             6563 mm/page_alloc.c 			zone->zone_start_pfn = zone_start_pfn;
zone             6565 mm/page_alloc.c 			zone->zone_start_pfn = 0;
zone             6566 mm/page_alloc.c 		zone->spanned_pages = size;
zone             6567 mm/page_alloc.c 		zone->present_pages = real_size;
zone             6601 mm/page_alloc.c 				struct zone *zone,
zone             6606 mm/page_alloc.c 	zone->pageblock_flags = NULL;
zone             6608 mm/page_alloc.c 		zone->pageblock_flags =
zone             6611 mm/page_alloc.c 		if (!zone->pageblock_flags)
zone             6613 mm/page_alloc.c 			      usemapsize, zone->name, pgdat->node_id);
zone             6617 mm/page_alloc.c static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
zone             6715 mm/page_alloc.c static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
zone             6718 mm/page_alloc.c 	atomic_long_set(&zone->managed_pages, remaining_pages);
zone             6719 mm/page_alloc.c 	zone_set_nid(zone, nid);
zone             6720 mm/page_alloc.c 	zone->name = zone_names[idx];
zone             6721 mm/page_alloc.c 	zone->zone_pgdat = NODE_DATA(nid);
zone             6722 mm/page_alloc.c 	spin_lock_init(&zone->lock);
zone             6723 mm/page_alloc.c 	zone_seqlock_init(zone);
zone             6724 mm/page_alloc.c 	zone_pcp_init(zone);
zone             6764 mm/page_alloc.c 		struct zone *zone = pgdat->node_zones + j;
zone             6766 mm/page_alloc.c 		unsigned long zone_start_pfn = zone->zone_start_pfn;
zone             6768 mm/page_alloc.c 		size = zone->spanned_pages;
zone             6769 mm/page_alloc.c 		freesize = zone->present_pages;
zone             6808 mm/page_alloc.c 		zone_init_internals(zone, j, nid, freesize);
zone             6814 mm/page_alloc.c 		setup_usemap(pgdat, zone, zone_start_pfn, size);
zone             6815 mm/page_alloc.c 		init_currently_empty_zone(zone, zone_start_pfn, size);
zone             7318 mm/page_alloc.c 		struct zone *zone = &pgdat->node_zones[zone_type];
zone             7319 mm/page_alloc.c 		if (populated_zone(zone)) {
zone             7679 mm/page_alloc.c 			struct zone *zone = pgdat->node_zones + i;
zone             7681 mm/page_alloc.c 			unsigned long managed_pages = zone_managed_pages(zone);
zone             7685 mm/page_alloc.c 				if (zone->lowmem_reserve[j] > max)
zone             7686 mm/page_alloc.c 					max = zone->lowmem_reserve[j];
zone             7690 mm/page_alloc.c 			max += high_wmark_pages(zone);
zone             7716 mm/page_alloc.c 			struct zone *zone = pgdat->node_zones + j;
zone             7717 mm/page_alloc.c 			unsigned long managed_pages = zone_managed_pages(zone);
zone             7719 mm/page_alloc.c 			zone->lowmem_reserve[j] = 0;
zone             7723 mm/page_alloc.c 				struct zone *lower_zone;
zone             7748 mm/page_alloc.c 	struct zone *zone;
zone             7752 mm/page_alloc.c 	for_each_zone(zone) {
zone             7753 mm/page_alloc.c 		if (!is_highmem(zone))
zone             7754 mm/page_alloc.c 			lowmem_pages += zone_managed_pages(zone);
zone             7757 mm/page_alloc.c 	for_each_zone(zone) {
zone             7760 mm/page_alloc.c 		spin_lock_irqsave(&zone->lock, flags);
zone             7761 mm/page_alloc.c 		tmp = (u64)pages_min * zone_managed_pages(zone);
zone             7763 mm/page_alloc.c 		if (is_highmem(zone)) {
zone             7775 mm/page_alloc.c 			min_pages = zone_managed_pages(zone) / 1024;
zone             7777 mm/page_alloc.c 			zone->_watermark[WMARK_MIN] = min_pages;
zone             7783 mm/page_alloc.c 			zone->_watermark[WMARK_MIN] = tmp;
zone             7792 mm/page_alloc.c 			    mult_frac(zone_managed_pages(zone),
zone             7795 mm/page_alloc.c 		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
zone             7796 mm/page_alloc.c 		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
zone             7797 mm/page_alloc.c 		zone->watermark_boost = 0;
zone             7799 mm/page_alloc.c 		spin_unlock_irqrestore(&zone->lock, flags);
zone             7929 mm/page_alloc.c 	struct zone *zone;
zone             7934 mm/page_alloc.c 	for_each_zone(zone)
zone             7935 mm/page_alloc.c 		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
zone             7957 mm/page_alloc.c 	struct zone *zone;
zone             7962 mm/page_alloc.c 	for_each_zone(zone)
zone             7963 mm/page_alloc.c 		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
zone             8007 mm/page_alloc.c 	struct zone *zone;
zone             8030 mm/page_alloc.c 	for_each_populated_zone(zone) {
zone             8034 mm/page_alloc.c 			pageset_set_high_and_batch(zone,
zone             8035 mm/page_alloc.c 					per_cpu_ptr(zone->pageset, cpu));
zone             8191 mm/page_alloc.c bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
zone             8236 mm/page_alloc.c 		if (zone_idx(zone) == ZONE_MOVABLE)
zone             8298 mm/page_alloc.c 	WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
zone             8348 mm/page_alloc.c 		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
zone             8393 mm/page_alloc.c 		.zone = page_zone(pfn_to_page(start)),
zone             8532 mm/page_alloc.c void __meminit zone_pcp_update(struct zone *zone)
zone             8537 mm/page_alloc.c 		pageset_set_high_and_batch(zone,
zone             8538 mm/page_alloc.c 				per_cpu_ptr(zone->pageset, cpu));
zone             8542 mm/page_alloc.c void zone_pcp_reset(struct zone *zone)
zone             8550 mm/page_alloc.c 	if (zone->pageset != &boot_pageset) {
zone             8552 mm/page_alloc.c 			pset = per_cpu_ptr(zone->pageset, cpu);
zone             8553 mm/page_alloc.c 			drain_zonestat(zone, pset);
zone             8555 mm/page_alloc.c 		free_percpu(zone->pageset);
zone             8556 mm/page_alloc.c 		zone->pageset = &boot_pageset;
zone             8570 mm/page_alloc.c 	struct zone *zone;
zone             8584 mm/page_alloc.c 	zone = page_zone(pfn_to_page(pfn));
zone             8585 mm/page_alloc.c 	spin_lock_irqsave(&zone->lock, flags);
zone             8612 mm/page_alloc.c 		del_page_from_free_area(page, &zone->free_area[order]);
zone             8617 mm/page_alloc.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone             8625 mm/page_alloc.c 	struct zone *zone = page_zone(page);
zone             8630 mm/page_alloc.c 	spin_lock_irqsave(&zone->lock, flags);
zone             8637 mm/page_alloc.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone             8650 mm/page_alloc.c 	struct zone *zone = page_zone(page);
zone             8656 mm/page_alloc.c 	spin_lock_irqsave(&zone->lock, flags);
zone             8666 mm/page_alloc.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone               20 mm/page_isolation.c 	struct zone *zone;
zone               26 mm/page_isolation.c 	zone = page_zone(page);
zone               28 mm/page_isolation.c 	spin_lock_irqsave(&zone->lock, flags);
zone               62 mm/page_isolation.c 	if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
zone               77 mm/page_isolation.c 		zone->nr_isolate_pageblock++;
zone               78 mm/page_isolation.c 		nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
zone               81 mm/page_isolation.c 		__mod_zone_freepage_state(zone, -nr_pages, mt);
zone               84 mm/page_isolation.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone               86 mm/page_isolation.c 		drain_all_pages(zone);
zone               92 mm/page_isolation.c 	struct zone *zone;
zone               99 mm/page_isolation.c 	zone = page_zone(page);
zone              100 mm/page_isolation.c 	spin_lock_irqsave(&zone->lock, flags);
zone              133 mm/page_isolation.c 		nr_pages = move_freepages_block(zone, page, migratetype, NULL);
zone              134 mm/page_isolation.c 		__mod_zone_freepage_state(zone, nr_pages, migratetype);
zone              137 mm/page_isolation.c 	zone->nr_isolate_pageblock--;
zone              139 mm/page_isolation.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone              293 mm/page_isolation.c 	struct zone *zone;
zone              309 mm/page_isolation.c 	zone = page_zone(page);
zone              310 mm/page_isolation.c 	spin_lock_irqsave(&zone->lock, flags);
zone              313 mm/page_isolation.c 	spin_unlock_irqrestore(&zone->lock, flags);
zone              254 mm/page_owner.c 				       pg_data_t *pgdat, struct zone *zone)
zone              259 mm/page_owner.c 	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
zone              260 mm/page_owner.c 	unsigned long end_pfn = pfn + zone->spanned_pages;
zone              266 mm/page_owner.c 	pfn = zone->zone_start_pfn;
zone              292 mm/page_owner.c 			if (page_zone(page) != zone)
zone              331 mm/page_owner.c 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
zone              546 mm/page_owner.c static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
zone              548 mm/page_owner.c 	unsigned long pfn = zone->zone_start_pfn;
zone              549 mm/page_owner.c 	unsigned long end_pfn = zone_end_pfn(zone);
zone              577 mm/page_owner.c 			if (page_zone(page) != zone)
zone              615 mm/page_owner.c 		pgdat->node_id, zone->name, count);
zone              620 mm/page_owner.c 	struct zone *zone;
zone              621 mm/page_owner.c 	struct zone *node_zones = pgdat->node_zones;
zone              623 mm/page_owner.c 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
zone              624 mm/page_owner.c 		if (!populated_zone(zone))
zone              627 mm/page_owner.c 		init_pages_in_zone(pgdat, zone);
zone              105 mm/shuffle.c   void __meminit __shuffle_zone(struct zone *z)
zone              180 mm/shuffle.c   	struct zone *z;
zone               32 mm/shuffle.h   extern void __shuffle_zone(struct zone *z);
zone               33 mm/shuffle.h   static inline void shuffle_zone(struct zone *z)
zone               51 mm/shuffle.h   static inline void shuffle_zone(struct zone *z)
zone             3107 mm/slab.c      	struct zone *zone;
zone             3126 mm/slab.c      	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
zone             3127 mm/slab.c      		nid = zone_to_nid(zone);
zone             3129 mm/slab.c      		if (cpuset_zone_allowed(zone, flags) &&
zone             1890 mm/slub.c      	struct zone *zone;
zone             1920 mm/slub.c      		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
zone             1923 mm/slub.c      			n = get_node(s, zone_to_nid(zone));
zone             1925 mm/slub.c      			if (n && cpuset_zone_allowed(zone, flags) &&
zone              333 mm/vmscan.c    unsigned long zone_reclaimable_pages(struct zone *zone)
zone              337 mm/vmscan.c    	nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
zone              338 mm/vmscan.c    		zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
zone              340 mm/vmscan.c    		nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
zone              341 mm/vmscan.c    			zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
zone              364 mm/vmscan.c    		struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
zone              367 mm/vmscan.c    		if (!managed_zone(zone))
zone             1536 mm/vmscan.c    unsigned long reclaim_clean_pages_from_list(struct zone *zone,
zone             1557 mm/vmscan.c    	ret = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
zone             1560 mm/vmscan.c    	mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -ret);
zone             2367 mm/vmscan.c    			struct zone *zone = &pgdat->node_zones[z];
zone             2368 mm/vmscan.c    			if (!managed_zone(zone))
zone             2371 mm/vmscan.c    			total_high_wmark += high_wmark_pages(zone);
zone             2724 mm/vmscan.c    		struct zone *zone = &pgdat->node_zones[z];
zone             2725 mm/vmscan.c    		if (!managed_zone(zone))
zone             2728 mm/vmscan.c    		switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
zone             2918 mm/vmscan.c    static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
zone             2923 mm/vmscan.c    	suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
zone             2940 mm/vmscan.c    	watermark = high_wmark_pages(zone) + compact_gap(sc->order);
zone             2942 mm/vmscan.c    	return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
zone             2956 mm/vmscan.c    	struct zone *zone;
zone             2973 mm/vmscan.c    	for_each_zone_zonelist_nodemask(zone, z, zonelist,
zone             2980 mm/vmscan.c    			if (!cpuset_zone_allowed(zone,
zone             2995 mm/vmscan.c    			    compaction_ready(zone, sc)) {
zone             3006 mm/vmscan.c    			if (zone->zone_pgdat == last_pgdat)
zone             3016 mm/vmscan.c    			nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat,
zone             3025 mm/vmscan.c    		if (zone->zone_pgdat == last_pgdat)
zone             3027 mm/vmscan.c    		last_pgdat = zone->zone_pgdat;
zone             3028 mm/vmscan.c    		shrink_node(zone->zone_pgdat, sc);
zone             3075 mm/vmscan.c    	struct zone *zone;
zone             3103 mm/vmscan.c    	for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
zone             3105 mm/vmscan.c    		if (zone->zone_pgdat == last_pgdat)
zone             3107 mm/vmscan.c    		last_pgdat = zone->zone_pgdat;
zone             3108 mm/vmscan.c    		snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
zone             3134 mm/vmscan.c    	struct zone *zone;
zone             3144 mm/vmscan.c    		zone = &pgdat->node_zones[i];
zone             3145 mm/vmscan.c    		if (!managed_zone(zone))
zone             3148 mm/vmscan.c    		if (!zone_reclaimable_pages(zone))
zone             3151 mm/vmscan.c    		pfmemalloc_reserve += min_wmark_pages(zone);
zone             3152 mm/vmscan.c    		free_pages += zone_page_state(zone, NR_FREE_PAGES);
zone             3184 mm/vmscan.c    	struct zone *zone;
zone             3218 mm/vmscan.c    	for_each_zone_zonelist_nodemask(zone, z, zonelist,
zone             3220 mm/vmscan.c    		if (zone_idx(zone) > ZONE_NORMAL)
zone             3224 mm/vmscan.c    		pgdat = zone->zone_pgdat;
zone             3253 mm/vmscan.c    	wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
zone             3421 mm/vmscan.c    	struct zone *zone;
zone             3431 mm/vmscan.c    		zone = pgdat->node_zones + i;
zone             3432 mm/vmscan.c    		if (!managed_zone(zone))
zone             3435 mm/vmscan.c    		if (zone->watermark_boost)
zone             3450 mm/vmscan.c    	struct zone *zone;
zone             3457 mm/vmscan.c    		zone = pgdat->node_zones + i;
zone             3459 mm/vmscan.c    		if (!managed_zone(zone))
zone             3462 mm/vmscan.c    		mark = high_wmark_pages(zone);
zone             3463 mm/vmscan.c    		if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
zone             3533 mm/vmscan.c    	struct zone *zone;
zone             3539 mm/vmscan.c    		zone = pgdat->node_zones + z;
zone             3540 mm/vmscan.c    		if (!managed_zone(zone))
zone             3543 mm/vmscan.c    		sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
zone             3587 mm/vmscan.c    	struct zone *zone;
zone             3607 mm/vmscan.c    		zone = pgdat->node_zones + i;
zone             3608 mm/vmscan.c    		if (!managed_zone(zone))
zone             3611 mm/vmscan.c    		nr_boost_reclaim += zone->watermark_boost;
zone             3612 mm/vmscan.c    		zone_boosts[i] = zone->watermark_boost;
zone             3638 mm/vmscan.c    				zone = pgdat->node_zones + i;
zone             3639 mm/vmscan.c    				if (!managed_zone(zone))
zone             3759 mm/vmscan.c    			zone = pgdat->node_zones + i;
zone             3760 mm/vmscan.c    			spin_lock_irqsave(&zone->lock, flags);
zone             3761 mm/vmscan.c    			zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
zone             3762 mm/vmscan.c    			spin_unlock_irqrestore(&zone->lock, flags);
zone             3976 mm/vmscan.c    void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
zone             3981 mm/vmscan.c    	if (!managed_zone(zone))
zone             3984 mm/vmscan.c    	if (!cpuset_zone_allowed(zone, gfp_flags))
zone             3986 mm/vmscan.c    	pgdat = zone->zone_pgdat;
zone               40 mm/vmstat.c    static void zero_zone_numa_counters(struct zone *zone)
zone               45 mm/vmstat.c    		atomic_long_set(&zone->vm_numa_stat[item], 0);
zone               47 mm/vmstat.c    			per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
zone               55 mm/vmstat.c    	struct zone *zone;
zone               57 mm/vmstat.c    	for_each_populated_zone(zone)
zone               58 mm/vmstat.c    		zero_zone_numa_counters(zone);
zone              172 mm/vmstat.c    int calculate_pressure_threshold(struct zone *zone)
zone              185 mm/vmstat.c    	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
zone              196 mm/vmstat.c    int calculate_normal_threshold(struct zone *zone)
zone              231 mm/vmstat.c    	mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
zone              249 mm/vmstat.c    	struct zone *zone;
zone              260 mm/vmstat.c    	for_each_populated_zone(zone) {
zone              261 mm/vmstat.c    		struct pglist_data *pgdat = zone->zone_pgdat;
zone              264 mm/vmstat.c    		threshold = calculate_normal_threshold(zone);
zone              269 mm/vmstat.c    			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
zone              283 mm/vmstat.c    		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
zone              286 mm/vmstat.c    			zone->percpu_drift_mark = high_wmark_pages(zone) +
zone              292 mm/vmstat.c    				int (*calculate_pressure)(struct zone *))
zone              294 mm/vmstat.c    	struct zone *zone;
zone              300 mm/vmstat.c    		zone = &pgdat->node_zones[i];
zone              301 mm/vmstat.c    		if (!zone->percpu_drift_mark)
zone              304 mm/vmstat.c    		threshold = (*calculate_pressure)(zone);
zone              306 mm/vmstat.c    			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
zone              316 mm/vmstat.c    void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
zone              319 mm/vmstat.c    	struct per_cpu_pageset __percpu *pcp = zone->pageset;
zone              329 mm/vmstat.c    		zone_page_state_add(x, zone, item);
zone              379 mm/vmstat.c    void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
zone              381 mm/vmstat.c    	struct per_cpu_pageset __percpu *pcp = zone->pageset;
zone              390 mm/vmstat.c    		zone_page_state_add(v + overstep, zone, item);
zone              423 mm/vmstat.c    void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
zone              425 mm/vmstat.c    	struct per_cpu_pageset __percpu *pcp = zone->pageset;
zone              434 mm/vmstat.c    		zone_page_state_add(v - overstep, zone, item);
zone              480 mm/vmstat.c    static inline void mod_zone_state(struct zone *zone,
zone              483 mm/vmstat.c    	struct per_cpu_pageset __percpu *pcp = zone->pageset;
zone              515 mm/vmstat.c    		zone_page_state_add(z, zone, item);
zone              518 mm/vmstat.c    void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
zone              521 mm/vmstat.c    	mod_zone_state(zone, item, delta, 0);
zone              602 mm/vmstat.c    void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
zone              608 mm/vmstat.c    	__mod_zone_page_state(zone, item, delta);
zone              616 mm/vmstat.c    	struct zone *zone;
zone              618 mm/vmstat.c    	zone = page_zone(page);
zone              620 mm/vmstat.c    	__inc_zone_state(zone, item);
zone              748 mm/vmstat.c    	struct zone *zone;
zone              757 mm/vmstat.c    	for_each_populated_zone(zone) {
zone              758 mm/vmstat.c    		struct per_cpu_pageset __percpu *p = zone->pageset;
zone              766 mm/vmstat.c    				atomic_long_add(v, &zone->vm_stat[i]);
zone              781 mm/vmstat.c    				atomic_long_add(v, &zone->vm_numa_stat[i]);
zone              803 mm/vmstat.c    			if (zone_to_nid(zone) == numa_node_id()) {
zone              812 mm/vmstat.c    				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
zone              850 mm/vmstat.c    	struct zone *zone;
zone              858 mm/vmstat.c    	for_each_populated_zone(zone) {
zone              861 mm/vmstat.c    		p = per_cpu_ptr(zone->pageset, cpu);
zone              869 mm/vmstat.c    				atomic_long_add(v, &zone->vm_stat[i]);
zone              880 mm/vmstat.c    				atomic_long_add(v, &zone->vm_numa_stat[i]);
zone              913 mm/vmstat.c    void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
zone              921 mm/vmstat.c    			atomic_long_add(v, &zone->vm_stat[i]);
zone              931 mm/vmstat.c    			atomic_long_add(v, &zone->vm_numa_stat[i]);
zone              939 mm/vmstat.c    void __inc_numa_state(struct zone *zone,
zone              942 mm/vmstat.c    	struct per_cpu_pageset __percpu *pcp = zone->pageset;
zone              949 mm/vmstat.c    		zone_numa_state_add(v, zone, item);
zone              962 mm/vmstat.c    	struct zone *zones = NODE_DATA(node)->node_zones;
zone              979 mm/vmstat.c    	struct zone *zones = NODE_DATA(node)->node_zones;
zone             1020 mm/vmstat.c    static void fill_contig_page_info(struct zone *zone,
zone             1034 mm/vmstat.c    		blocks = zone->free_area[order].nr_free;
zone             1078 mm/vmstat.c    int fragmentation_index(struct zone *zone, unsigned int order)
zone             1082 mm/vmstat.c    	fill_contig_page_info(zone, order, &info);
zone             1331 mm/vmstat.c    		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
zone             1333 mm/vmstat.c    	struct zone *zone;
zone             1334 mm/vmstat.c    	struct zone *node_zones = pgdat->node_zones;
zone             1337 mm/vmstat.c    	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
zone             1338 mm/vmstat.c    		if (assert_populated && !populated_zone(zone))
zone             1342 mm/vmstat.c    			spin_lock_irqsave(&zone->lock, flags);
zone             1343 mm/vmstat.c    		print(m, pgdat, zone);
zone             1345 mm/vmstat.c    			spin_unlock_irqrestore(&zone->lock, flags);
zone             1352 mm/vmstat.c    						struct zone *zone)
zone             1356 mm/vmstat.c    	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
zone             1358 mm/vmstat.c    		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
zone             1373 mm/vmstat.c    					pg_data_t *pgdat, struct zone *zone)
zone             1380 mm/vmstat.c    					zone->name,
zone             1388 mm/vmstat.c    			area = &(zone->free_area[order]);
zone             1406 mm/vmstat.c    			spin_unlock_irq(&zone->lock);
zone             1408 mm/vmstat.c    			spin_lock_irq(&zone->lock);
zone             1432 mm/vmstat.c    					pg_data_t *pgdat, struct zone *zone)
zone             1436 mm/vmstat.c    	unsigned long start_pfn = zone->zone_start_pfn;
zone             1437 mm/vmstat.c    	unsigned long end_pfn = zone_end_pfn(zone);
zone             1448 mm/vmstat.c    		if (!memmap_valid_within(pfn, page, zone))
zone             1451 mm/vmstat.c    		if (page_zone(page) != zone)
zone             1461 mm/vmstat.c    	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
zone             1545 mm/vmstat.c    static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
zone             1550 mm/vmstat.c    		struct zone *compare = &pgdat->node_zones[zid];
zone             1553 mm/vmstat.c    			return zone == compare;
zone             1560 mm/vmstat.c    							struct zone *zone)
zone             1563 mm/vmstat.c    	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
zone             1564 mm/vmstat.c    	if (is_zone_first_populated(pgdat, zone)) {
zone             1581 mm/vmstat.c    		   zone_page_state(zone, NR_FREE_PAGES),
zone             1582 mm/vmstat.c    		   min_wmark_pages(zone),
zone             1583 mm/vmstat.c    		   low_wmark_pages(zone),
zone             1584 mm/vmstat.c    		   high_wmark_pages(zone),
zone             1585 mm/vmstat.c    		   zone->spanned_pages,
zone             1586 mm/vmstat.c    		   zone->present_pages,
zone             1587 mm/vmstat.c    		   zone_managed_pages(zone));
zone             1591 mm/vmstat.c    		   zone->lowmem_reserve[0]);
zone             1592 mm/vmstat.c    	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
zone             1593 mm/vmstat.c    		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
zone             1597 mm/vmstat.c    	if (!populated_zone(zone)) {
zone             1604 mm/vmstat.c    				zone_page_state(zone, i));
zone             1610 mm/vmstat.c    				zone_numa_state_snapshot(zone, i));
zone             1617 mm/vmstat.c    		pageset = per_cpu_ptr(zone->pageset, i);
zone             1636 mm/vmstat.c    		   zone->zone_start_pfn);
zone             1833 mm/vmstat.c    	struct zone *zone;
zone             1835 mm/vmstat.c    	for_each_populated_zone(zone) {
zone             1836 mm/vmstat.c    		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
zone             2023 mm/vmstat.c    					pg_data_t *pgdat, struct zone *zone)
zone             2031 mm/vmstat.c    				zone->name);
zone             2033 mm/vmstat.c    		fill_contig_page_info(zone, order, &info);
zone             2083 mm/vmstat.c    					pg_data_t *pgdat, struct zone *zone)
zone             2093 mm/vmstat.c    				zone->name);
zone             2095 mm/vmstat.c    		fill_contig_page_info(zone, order, &info);
zone              281 net/core/flow_dissector.c 	key->ct_zone = ct->zone.id;
zone               44 net/netfilter/nf_conncount.c 	struct nf_conntrack_zone	zone;
zone              105 net/netfilter/nf_conncount.c 	found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
zone              128 net/netfilter/nf_conncount.c 			      const struct nf_conntrack_zone *zone)
zone              145 net/netfilter/nf_conncount.c 				    nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
zone              146 net/netfilter/nf_conncount.c 				    nf_ct_zone_id(zone, zone->dir))
zone              157 net/netfilter/nf_conncount.c 		    nf_ct_zone_equal(found_ct, zone, zone->dir)) {
zone              188 net/netfilter/nf_conncount.c 	conn->zone = *zone;
zone              199 net/netfilter/nf_conncount.c 		     const struct nf_conntrack_zone *zone)
zone              205 net/netfilter/nf_conncount.c 	ret = __nf_conncount_add(net, list, tuple, zone);
zone              306 net/netfilter/nf_conncount.c 	    const struct nf_conntrack_zone *zone)
zone              333 net/netfilter/nf_conncount.c 			ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
zone              369 net/netfilter/nf_conncount.c 	conn->zone = *zone;
zone              389 net/netfilter/nf_conncount.c 	   const struct nf_conntrack_zone *zone)
zone              429 net/netfilter/nf_conncount.c 			ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
zone              441 net/netfilter/nf_conncount.c 	return insert_tree(net, data, root, hash, key, tuple, zone);
zone              510 net/netfilter/nf_conncount.c 				const struct nf_conntrack_zone *zone)
zone              512 net/netfilter/nf_conncount.c 	return count_tree(net, data, key, tuple, zone);
zone              542 net/netfilter/nf_conntrack_core.c 				 const struct nf_conntrack_zone *zone,
zone              566 net/netfilter/nf_conntrack_core.c 	nf_ct_zone_add(tmpl, zone);
zone              686 net/netfilter/nf_conntrack_core.c 		const struct nf_conntrack_zone *zone,
zone              695 net/netfilter/nf_conntrack_core.c 	       nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
zone              730 net/netfilter/nf_conntrack_core.c ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
zone              751 net/netfilter/nf_conntrack_core.c 		if (nf_ct_key_equal(h, tuple, zone, net))
zone              769 net/netfilter/nf_conntrack_core.c __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
zone              777 net/netfilter/nf_conntrack_core.c 	h = ____nf_conntrack_find(net, zone, tuple, hash);
zone              784 net/netfilter/nf_conntrack_core.c 			if (likely(nf_ct_key_equal(h, tuple, zone, net)))
zone              800 net/netfilter/nf_conntrack_core.c nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
zone              803 net/netfilter/nf_conntrack_core.c 	return __nf_conntrack_find_get(net, zone, tuple,
zone              821 net/netfilter/nf_conntrack_core.c 	const struct nf_conntrack_zone *zone;
zone              828 net/netfilter/nf_conntrack_core.c 	zone = nf_ct_zone(ct);
zone              842 net/netfilter/nf_conntrack_core.c 				    zone, net))
zone              847 net/netfilter/nf_conntrack_core.c 				    zone, net))
zone              930 net/netfilter/nf_conntrack_core.c 	const struct nf_conntrack_zone *zone;
zone              952 net/netfilter/nf_conntrack_core.c 	zone = nf_ct_zone(ct);
zone             1000 net/netfilter/nf_conntrack_core.c 				    zone, net))
zone             1005 net/netfilter/nf_conntrack_core.c 				    zone, net))
zone             1055 net/netfilter/nf_conntrack_core.c 	const struct nf_conntrack_zone *zone;
zone             1062 net/netfilter/nf_conntrack_core.c 	zone = nf_ct_zone(ignored_conntrack);
zone             1080 net/netfilter/nf_conntrack_core.c 		if (nf_ct_key_equal(h, tuple, zone, net)) {
zone             1346 net/netfilter/nf_conntrack_core.c 		     const struct nf_conntrack_zone *zone,
zone             1388 net/netfilter/nf_conntrack_core.c 	nf_ct_zone_add(ct, zone);
zone             1401 net/netfilter/nf_conntrack_core.c 				   const struct nf_conntrack_zone *zone,
zone             1406 net/netfilter/nf_conntrack_core.c 	return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
zone             1441 net/netfilter/nf_conntrack_core.c 	const struct nf_conntrack_zone *zone;
zone             1450 net/netfilter/nf_conntrack_core.c 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
zone             1451 net/netfilter/nf_conntrack_core.c 	ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
zone             1479 net/netfilter/nf_conntrack_core.c 		exp = nf_ct_find_expectation(net, zone, tuple);
zone             1529 net/netfilter/nf_conntrack_core.c 	const struct nf_conntrack_zone *zone;
zone             1545 net/netfilter/nf_conntrack_core.c 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
zone             1547 net/netfilter/nf_conntrack_core.c 	h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
zone               97 net/netfilter/nf_conntrack_expect.c 		const struct nf_conntrack_zone *zone,
zone              102 net/netfilter/nf_conntrack_expect.c 	       nf_ct_zone_equal_any(i->master, zone);
zone              118 net/netfilter/nf_conntrack_expect.c 		    const struct nf_conntrack_zone *zone,
zone              129 net/netfilter/nf_conntrack_expect.c 		if (nf_ct_exp_equal(tuple, i, zone, net))
zone              139 net/netfilter/nf_conntrack_expect.c 		      const struct nf_conntrack_zone *zone,
zone              145 net/netfilter/nf_conntrack_expect.c 	i = __nf_ct_expect_find(net, zone, tuple);
zone              158 net/netfilter/nf_conntrack_expect.c 		       const struct nf_conntrack_zone *zone,
zone              170 net/netfilter/nf_conntrack_expect.c 		    nf_ct_exp_equal(tuple, i, zone, net)) {
zone              146 net/netfilter/nf_conntrack_netlink.c 				  const struct nf_conntrack_zone *zone, int dir)
zone              148 net/netfilter/nf_conntrack_netlink.c 	if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
zone              150 net/netfilter/nf_conntrack_netlink.c 	if (nla_put_be16(skb, attrtype, htons(zone->id)))
zone              513 net/netfilter/nf_conntrack_netlink.c 	const struct nf_conntrack_zone *zone;
zone              529 net/netfilter/nf_conntrack_netlink.c 	zone = nf_ct_zone(ct);
zone              536 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
zone              546 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
zone              551 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
zone              682 net/netfilter/nf_conntrack_netlink.c 	const struct nf_conntrack_zone *zone;
zone              724 net/netfilter/nf_conntrack_netlink.c 	zone = nf_ct_zone(ct);
zone              731 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
zone              741 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
zone              746 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
zone             1085 net/netfilter/nf_conntrack_netlink.c 		     struct nf_conntrack_zone *zone)
zone             1087 net/netfilter/nf_conntrack_netlink.c 	nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
zone             1091 net/netfilter/nf_conntrack_netlink.c 		zone->id = ntohs(nla_get_be16(attr));
zone             1101 net/netfilter/nf_conntrack_netlink.c 			   struct nf_conntrack_zone *zone)
zone             1105 net/netfilter/nf_conntrack_netlink.c 	if (zone->id != NF_CT_DEFAULT_ZONE_ID)
zone             1108 net/netfilter/nf_conntrack_netlink.c 	ret = ctnetlink_parse_zone(attr, zone);
zone             1113 net/netfilter/nf_conntrack_netlink.c 		zone->dir = NF_CT_ZONE_DIR_REPL;
zone             1115 net/netfilter/nf_conntrack_netlink.c 		zone->dir = NF_CT_ZONE_DIR_ORIG;
zone             1129 net/netfilter/nf_conntrack_netlink.c 		      u_int8_t l3num, struct nf_conntrack_zone *zone)
zone             1158 net/netfilter/nf_conntrack_netlink.c 		if (!zone)
zone             1162 net/netfilter/nf_conntrack_netlink.c 						 type, zone);
zone             1262 net/netfilter/nf_conntrack_netlink.c 	struct nf_conntrack_zone zone;
zone             1265 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
zone             1271 net/netfilter/nf_conntrack_netlink.c 					    nfmsg->nfgen_family, &zone);
zone             1274 net/netfilter/nf_conntrack_netlink.c 					    nfmsg->nfgen_family, &zone);
zone             1286 net/netfilter/nf_conntrack_netlink.c 	h = nf_conntrack_find_get(net, &zone, &tuple);
zone             1324 net/netfilter/nf_conntrack_netlink.c 	struct nf_conntrack_zone zone;
zone             1338 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
zone             1344 net/netfilter/nf_conntrack_netlink.c 					    u3, &zone);
zone             1347 net/netfilter/nf_conntrack_netlink.c 					    u3, &zone);
zone             1354 net/netfilter/nf_conntrack_netlink.c 	h = nf_conntrack_find_get(net, &zone, &tuple);
zone             1937 net/netfilter/nf_conntrack_netlink.c 			   const struct nf_conntrack_zone *zone,
zone             1949 net/netfilter/nf_conntrack_netlink.c 	ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
zone             2069 net/netfilter/nf_conntrack_netlink.c 		master_h = nf_conntrack_find_get(net, zone, &master);
zone             2108 net/netfilter/nf_conntrack_netlink.c 	struct nf_conntrack_zone zone;
zone             2111 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
zone             2117 net/netfilter/nf_conntrack_netlink.c 					    u3, &zone);
zone             2124 net/netfilter/nf_conntrack_netlink.c 					    u3, &zone);
zone             2130 net/netfilter/nf_conntrack_netlink.c 		h = nf_conntrack_find_get(net, &zone, &otuple);
zone             2132 net/netfilter/nf_conntrack_netlink.c 		h = nf_conntrack_find_get(net, &zone, &rtuple);
zone             2144 net/netfilter/nf_conntrack_netlink.c 			ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
zone             2406 net/netfilter/nf_conntrack_netlink.c 	const struct nf_conntrack_zone *zone;
zone             2409 net/netfilter/nf_conntrack_netlink.c 	zone = nf_ct_zone(ct);
zone             2416 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
zone             2426 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
zone             2431 net/netfilter/nf_conntrack_netlink.c 	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
zone             2997 net/netfilter/nf_conntrack_netlink.c 	struct nf_conntrack_zone zone;
zone             3008 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
zone             3012 net/netfilter/nf_conntrack_netlink.c 	h = nf_conntrack_find_get(net, &zone, &tuple);
zone             3041 net/netfilter/nf_conntrack_netlink.c 	struct nf_conntrack_zone zone;
zone             3057 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
zone             3073 net/netfilter/nf_conntrack_netlink.c 	exp = nf_ct_expect_find_get(net, &zone, &tuple);
zone             3138 net/netfilter/nf_conntrack_netlink.c 	struct nf_conntrack_zone zone;
zone             3143 net/netfilter/nf_conntrack_netlink.c 		err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
zone             3153 net/netfilter/nf_conntrack_netlink.c 		exp = nf_ct_expect_find_get(net, &zone, &tuple);
zone             3309 net/netfilter/nf_conntrack_netlink.c 			const struct nf_conntrack_zone *zone,
zone             3335 net/netfilter/nf_conntrack_netlink.c 	h = nf_conntrack_find_get(net, zone, &master_tuple);
zone             3391 net/netfilter/nf_conntrack_netlink.c 	struct nf_conntrack_zone zone;
zone             3399 net/netfilter/nf_conntrack_netlink.c 	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
zone             3409 net/netfilter/nf_conntrack_netlink.c 	exp = __nf_ct_expect_find(net, &zone, &tuple);
zone             3414 net/netfilter/nf_conntrack_netlink.c 			err = ctnetlink_create_expect(net, &zone, cda, u3,
zone              153 net/netfilter/nf_conntrack_pptp.c 	const struct nf_conntrack_zone *zone;
zone              160 net/netfilter/nf_conntrack_pptp.c 	zone = nf_ct_zone(ct);
zone              161 net/netfilter/nf_conntrack_pptp.c 	h = nf_conntrack_find_get(net, zone, t);
zone              171 net/netfilter/nf_conntrack_pptp.c 		exp = nf_ct_expect_find_get(net, zone, t);
zone              111 net/netfilter/nf_conntrack_proto_icmp.c 	const struct nf_conntrack_zone *zone;
zone              119 net/netfilter/nf_conntrack_proto_icmp.c 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
zone              131 net/netfilter/nf_conntrack_proto_icmp.c 	h = nf_conntrack_find_get(state->net, zone, &innertuple);
zone              197 net/netfilter/nf_conntrack_standalone.c 	const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
zone              199 net/netfilter/nf_conntrack_standalone.c 	if (zone->dir != dir)
zone              201 net/netfilter/nf_conntrack_standalone.c 	switch (zone->dir) {
zone              203 net/netfilter/nf_conntrack_standalone.c 		seq_printf(s, "zone=%u ", zone->id);
zone              206 net/netfilter/nf_conntrack_standalone.c 		seq_printf(s, "zone-orig=%u ", zone->id);
zone              209 net/netfilter/nf_conntrack_standalone.c 		seq_printf(s, "zone-reply=%u ", zone->id);
zone              297 net/netfilter/nf_nat_core.c 		     const struct nf_conntrack_zone *zone,
zone              308 net/netfilter/nf_nat_core.c 		    nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
zone              328 net/netfilter/nf_nat_core.c find_best_ips_proto(const struct nf_conntrack_zone *zone,
zone              369 net/netfilter/nf_nat_core.c 			0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
zone              530 net/netfilter/nf_nat_core.c 	const struct nf_conntrack_zone *zone;
zone              533 net/netfilter/nf_nat_core.c 	zone = nf_ct_zone(ct);
zone              551 net/netfilter/nf_nat_core.c 		} else if (find_appropriate_src(net, zone,
zone              561 net/netfilter/nf_nat_core.c 	find_best_ips_proto(zone, tuple, range, ct, maniptype);
zone               27 net/netfilter/nft_connlimit.c 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
zone               39 net/netfilter/nft_connlimit.c 		zone = nf_ct_zone(ct);
zone               46 net/netfilter/nft_connlimit.c 	if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
zone              167 net/netfilter/nft_ct.c 		const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
zone              171 net/netfilter/nft_ct.c 			zoneid = nf_ct_zone_id(zone, priv->dir);
zone              173 net/netfilter/nft_ct.c 			zoneid = zone->id;
zone              237 net/netfilter/nft_ct.c 	struct nf_conntrack_zone zone = { .dir = NF_CT_DEFAULT_ZONE_DIR };
zone              248 net/netfilter/nft_ct.c 	zone.id = value;
zone              252 net/netfilter/nft_ct.c 		zone.dir = NF_CT_ZONE_DIR_ORIG;
zone              255 net/netfilter/nft_ct.c 		zone.dir = NF_CT_ZONE_DIR_REPL;
zone              264 net/netfilter/nft_ct.c 		nf_ct_zone_add(ct, &zone);
zone              267 net/netfilter/nft_ct.c 		ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC);
zone              365 net/netfilter/nft_ct.c 	struct nf_conntrack_zone zone = { .id = 0 };
zone              373 net/netfilter/nft_ct.c 		tmp = nf_ct_tmpl_alloc(&init_net, &zone, GFP_KERNEL);
zone              142 net/netfilter/xt_CT.c 	struct nf_conntrack_zone zone;
zone              153 net/netfilter/xt_CT.c 	if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
zone              163 net/netfilter/xt_CT.c 	memset(&zone, 0, sizeof(zone));
zone              164 net/netfilter/xt_CT.c 	zone.id = info->zone;
zone              165 net/netfilter/xt_CT.c 	zone.dir = xt_ct_flags_to_dir(info);
zone              167 net/netfilter/xt_CT.c 		zone.flags |= NF_CT_FLAG_MARK;
zone              169 net/netfilter/xt_CT.c 	ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
zone              227 net/netfilter/xt_CT.c 		.zone		= info->zone,
zone              290 net/netfilter/xt_CT.c 		.zone		= info->zone,
zone               36 net/netfilter/xt_connlimit.c 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
zone               45 net/netfilter/xt_connlimit.c 		zone = nf_ct_zone(ct);
zone               62 net/netfilter/xt_connlimit.c 		key[4] = zone->id;
zone               69 net/netfilter/xt_connlimit.c 		key[1] = zone->id;
zone               73 net/netfilter/xt_connlimit.c 					 zone);
zone               59 net/openvswitch/conntrack.c 	struct nf_conntrack_zone zone;
zone               86 net/openvswitch/conntrack.c 	u16 zone;
zone              190 net/openvswitch/conntrack.c 				const struct nf_conntrack_zone *zone,
zone              194 net/openvswitch/conntrack.c 	key->ct_zone = zone->id;
zone              238 net/openvswitch/conntrack.c 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
zone              262 net/openvswitch/conntrack.c 		zone = nf_ct_zone(ct);
zone              266 net/openvswitch/conntrack.c 			zone = &info->zone;
zone              268 net/openvswitch/conntrack.c 	__ovs_ct_update_key(key, state, zone, ct);
zone              494 net/openvswitch/conntrack.c 			    u16 zone, struct sk_buff *skb)
zone              500 net/openvswitch/conntrack.c 		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
zone              510 net/openvswitch/conntrack.c 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
zone              542 net/openvswitch/conntrack.c ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
zone              551 net/openvswitch/conntrack.c 	exp = __nf_ct_expect_find(net, zone, &tuple);
zone              568 net/openvswitch/conntrack.c 		h = nf_conntrack_find_get(net, zone, &tuple);
zone              606 net/openvswitch/conntrack.c ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
zone              631 net/openvswitch/conntrack.c 	h = nf_conntrack_find_get(net, zone, &tuple);
zone              665 net/openvswitch/conntrack.c 		       (key->ct_zone == info->zone.id);
zone              668 net/openvswitch/conntrack.c 		ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
zone             1048 net/openvswitch/conntrack.c 	exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
zone             1056 net/openvswitch/conntrack.c 		__ovs_ct_update_key(key, state, &info->zone, exp->master);
zone             1086 net/openvswitch/conntrack.c 	const struct ovs_ct_limit_info *info, u16 zone)
zone             1088 net/openvswitch/conntrack.c 	return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
zone             1098 net/openvswitch/conntrack.c 	head = ct_limit_hash_bucket(info, new_ct_limit->zone);
zone             1100 net/openvswitch/conntrack.c 		if (ct_limit->zone == new_ct_limit->zone) {
zone             1112 net/openvswitch/conntrack.c static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
zone             1118 net/openvswitch/conntrack.c 	head = ct_limit_hash_bucket(info, zone);
zone             1120 net/openvswitch/conntrack.c 		if (ct_limit->zone == zone) {
zone             1129 net/openvswitch/conntrack.c static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
zone             1134 net/openvswitch/conntrack.c 	head = ct_limit_hash_bucket(info, zone);
zone             1136 net/openvswitch/conntrack.c 		if (ct_limit->zone == zone)
zone             1152 net/openvswitch/conntrack.c 	conncount_key = info->zone.id;
zone             1154 net/openvswitch/conntrack.c 	per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
zone             1159 net/openvswitch/conntrack.c 					 &conncount_key, tuple, &info->zone);
zone             1193 net/openvswitch/conntrack.c 					info->zone.id);
zone             1294 net/openvswitch/conntrack.c 		err = handle_fragments(net, key, info->zone.id, skb);
zone             1542 net/openvswitch/conntrack.c 			info->zone.id = nla_get_u16(a);
zone             1666 net/openvswitch/conntrack.c 	nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
zone             1674 net/openvswitch/conntrack.c 	ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
zone             1792 net/openvswitch/conntrack.c 	    nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
zone             1939 net/openvswitch/conntrack.c 	u16 zone;
zone             1951 net/openvswitch/conntrack.c 				zone_limit->zone_id, &zone))) {
zone             1960 net/openvswitch/conntrack.c 			ct_limit->zone = zone;
zone             1983 net/openvswitch/conntrack.c 	u16 zone;
zone             1995 net/openvswitch/conntrack.c 				zone_limit->zone_id, &zone))) {
zone             1999 net/openvswitch/conntrack.c 			ct_limit_del(info, zone);
zone             2054 net/openvswitch/conntrack.c 	u16 zone;
zone             2066 net/openvswitch/conntrack.c 							&zone))) {
zone             2070 net/openvswitch/conntrack.c 			limit = ct_limit_get(info, zone);
zone             2074 net/openvswitch/conntrack.c 				net, info->data, zone, limit, reply);
zone             2106 net/openvswitch/conntrack.c 				ct_limit->zone, ct_limit->limit, reply);
zone               38 net/sched/act_connmark.c 	struct nf_conntrack_zone zone;
zone               72 net/sched/act_connmark.c 	zone.id = ca->zone;
zone               73 net/sched/act_connmark.c 	zone.dir = NF_CT_DEFAULT_ZONE_DIR;
zone               75 net/sched/act_connmark.c 	thash = nf_conntrack_find_get(ca->net, &zone, &tuple);
zone              137 net/sched/act_connmark.c 		ci->zone = parm->zone;
zone              156 net/sched/act_connmark.c 		ci->zone = parm->zone;
zone              183 net/sched/act_connmark.c 	opt.zone = ci->zone;
zone              152 net/sched/act_ct.c 				   u8 family, u16 zone)
zone              174 net/sched/act_ct.c 		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
zone              184 net/sched/act_ct.c 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
zone              420 net/sched/act_ct.c 	err = tcf_ct_handle_fragments(net, skb, family, p->zone);
zone              437 net/sched/act_ct.c 	cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
zone              594 net/sched/act_ct.c 	struct nf_conntrack_zone zone;
zone              598 net/sched/act_ct.c 	p->zone = NF_CT_DEFAULT_ZONE_ID;
zone              646 net/sched/act_ct.c 				   &p->zone, TCA_CT_ZONE,
zone              648 net/sched/act_ct.c 				   sizeof(p->zone));
zone              651 net/sched/act_ct.c 	if (p->zone == NF_CT_DEFAULT_ZONE_ID)
zone              654 net/sched/act_ct.c 	nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
zone              655 net/sched/act_ct.c 	tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
zone              873 net/sched/act_ct.c 				&p->zone, TCA_CT_ZONE,
zone              875 net/sched/act_ct.c 				sizeof(p->zone)))
zone               85 net/sched/act_ctinfo.c 	struct nf_conntrack_zone zone;
zone              120 net/sched/act_ctinfo.c 		zone.id = cp->zone;
zone              121 net/sched/act_ctinfo.c 		zone.dir = NF_CT_DEFAULT_ZONE_DIR;
zone              123 net/sched/act_ctinfo.c 		thash = nf_conntrack_find_get(cp->net, &zone, &tuple);
zone              243 net/sched/act_ctinfo.c 	cp_new->zone = tb[TCA_CTINFO_ZONE] ?
zone              307 net/sched/act_ctinfo.c 	if (nla_put_u16(skb, TCA_CTINFO_ZONE, cp->zone))
zone             3540 net/sched/cls_api.c 			entry->ct.zone = tcf_ct_zone(act);
zone              160 sound/soc/soc-jack.c 	struct snd_soc_jack_zone *zone;
zone              162 sound/soc/soc-jack.c 	list_for_each_entry(zone, &jack->jack_zones, list) {
zone              163 sound/soc/soc-jack.c 		if (micbias_voltage >= zone->min_mv &&
zone              164 sound/soc/soc-jack.c 			micbias_voltage < zone->max_mv)
zone              165 sound/soc/soc-jack.c 				return zone->jack_type;
zone              201 sound/synth/emux/emux_effect.c 		origp = (unsigned char*)&vp->zone->v.parm + offset;
zone              101 sound/synth/emux/emux_synth.c 		vp->zone = table[i];
zone              102 sound/synth/emux/emux_synth.c 		if (vp->zone->sample)
zone              103 sound/synth/emux/emux_synth.c 			vp->block = vp->zone->sample->block;
zone              499 sound/synth/emux/emux_synth.c 	vp->zone = NULL;
zone              552 sound/synth/emux/emux_synth.c 	vp->reg = vp->zone->v;
zone              512 sound/synth/emux/soundfont.c 	struct snd_sf_zone *zone;
zone              550 sound/synth/emux/soundfont.c 		for (zone = sf->zones; zone; zone = zone->next) {
zone              551 sound/synth/emux/soundfont.c 			if (!zone->mapped &&
zone              552 sound/synth/emux/soundfont.c 			    zone->bank == hdr.bank &&
zone              553 sound/synth/emux/soundfont.c 			    zone->instr == hdr.instr)
zone              582 sound/synth/emux/soundfont.c 		if ((zone = sf_zone_new(sflist, sf)) == NULL) {
zone              587 sound/synth/emux/soundfont.c 		zone->bank = tmpzone.bank;
zone              588 sound/synth/emux/soundfont.c 		zone->instr = tmpzone.instr;
zone              589 sound/synth/emux/soundfont.c 		zone->v = tmpzone.v;
zone              592 sound/synth/emux/soundfont.c 		zone->sample = set_sample(sf, &zone->v);
zone              943 sound/synth/emux/soundfont.c 	struct snd_sf_zone *zone;
zone              999 sound/synth/emux/soundfont.c 	if ((zone = sf_zone_new(sflist, sf)) == NULL) {
zone             1013 sound/synth/emux/soundfont.c 			kfree(zone);
zone             1022 sound/synth/emux/soundfont.c 	zone->v.sample = sample_id; /* the last sample */
zone             1023 sound/synth/emux/soundfont.c 	zone->v.rate_offset = calc_rate_offset(patch.base_freq);
zone             1025 sound/synth/emux/soundfont.c 	zone->v.root = note / 100;
zone             1026 sound/synth/emux/soundfont.c 	zone->v.tune = -(note % 100);
zone             1027 sound/synth/emux/soundfont.c 	zone->v.low = (freq_to_note(patch.low_note) + 99) / 100;
zone             1028 sound/synth/emux/soundfont.c 	zone->v.high = freq_to_note(patch.high_note) / 100;
zone             1030 sound/synth/emux/soundfont.c 	zone->v.pan = (patch.panning + 128) / 2;
zone             1034 sound/synth/emux/soundfont.c 		   (int)patch.base_freq, zone->v.rate_offset,
zone             1035 sound/synth/emux/soundfont.c 		   zone->v.root, zone->v.tune, zone->v.low, zone->v.high);
zone             1059 sound/synth/emux/soundfont.c 		zone->v.parm.volatkhld = 
zone             1062 sound/synth/emux/soundfont.c 		zone->v.parm.voldcysus = (calc_gus_sustain(patch.env_offset[2]) << 8) |
zone             1064 sound/synth/emux/soundfont.c 		zone->v.parm.volrelease = 0x8000 | snd_sf_calc_parm_decay(release);
zone             1065 sound/synth/emux/soundfont.c 		zone->v.attenuation = calc_gus_attenuation(patch.env_offset[0]);
zone             1069 sound/synth/emux/soundfont.c 			   zone->v.parm.volatkhld,
zone             1070 sound/synth/emux/soundfont.c 			   zone->v.parm.voldcysus,
zone             1071 sound/synth/emux/soundfont.c 			   zone->v.parm.volrelease,
zone             1072 sound/synth/emux/soundfont.c 			   zone->v.attenuation);
zone             1078 sound/synth/emux/soundfont.c 		zone->v.parm.volrelease = 0x807f;
zone             1084 sound/synth/emux/soundfont.c 		zone->v.parm.tremfrq = ((patch.tremolo_depth / 2) << 8) | rate;
zone             1089 sound/synth/emux/soundfont.c 		zone->v.parm.fm2frq2 = ((patch.vibrato_depth / 6) << 8) | rate;
zone             1095 sound/synth/emux/soundfont.c 		zone->v.mode = SNDRV_SFNT_MODE_LOOPING;
zone             1097 sound/synth/emux/soundfont.c 		zone->v.mode = 0;
zone             1101 sound/synth/emux/soundfont.c 	zone->bank = 0;
zone             1102 sound/synth/emux/soundfont.c 	zone->instr = patch.instr_no;
zone             1103 sound/synth/emux/soundfont.c 	zone->mapped = 0;
zone             1104 sound/synth/emux/soundfont.c 	zone->v.sf_id = sf->id;
zone             1106 sound/synth/emux/soundfont.c 	zone->sample = set_sample(sf, &zone->v);
zone             1109 sound/synth/emux/soundfont.c 	add_preset(sflist, zone);
zone             1165 sound/synth/emux/soundfont.c 	struct snd_sf_zone *zone;
zone             1168 sound/synth/emux/soundfont.c 	zone = search_first_zone(sflist, cur->bank, cur->instr, cur->v.low);
zone             1169 sound/synth/emux/soundfont.c 	if (zone && zone->v.sf_id != cur->v.sf_id) {
zone             1173 sound/synth/emux/soundfont.c 		for (p = zone; p; p = p->next_zone) {
zone             1179 sound/synth/emux/soundfont.c 		delete_preset(sflist, zone);
zone             1180 sound/synth/emux/soundfont.c 		zone = NULL; /* do not forget to clear this! */
zone             1186 sound/synth/emux/soundfont.c 	cur->next_zone = zone; /* zone link */
zone              110 tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c 	struct zone _5;
zone               96 tools/testing/selftests/net/tcp_mmap.c void hash_zone(void *zone, unsigned int length)
zone              101 tools/testing/selftests/net/tcp_mmap.c 		prefetch(zone + 384);
zone              102 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned long *)zone;
zone              103 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned long *)(zone + sizeof(long));
zone              104 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned long *)(zone + 2*sizeof(long));
zone              105 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned long *)(zone + 3*sizeof(long));
zone              106 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned long *)(zone + 4*sizeof(long));
zone              107 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned long *)(zone + 5*sizeof(long));
zone              108 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned long *)(zone + 6*sizeof(long));
zone              109 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned long *)(zone + 7*sizeof(long));
zone              110 tools/testing/selftests/net/tcp_mmap.c 		zone += 8*sizeof(long);
zone              114 tools/testing/selftests/net/tcp_mmap.c 		temp ^= *(unsigned char *)zone;
zone              115 tools/testing/selftests/net/tcp_mmap.c 		zone += 1;
zone               36 virt/kvm/coalesced_mmio.c 	if (addr < dev->zone.addr)
zone               38 virt/kvm/coalesced_mmio.c 	if (addr + len > dev->zone.addr + dev->zone.size)
zone               89 virt/kvm/coalesced_mmio.c 	ring->coalesced_mmio[insert].pio = dev->zone.pio;
zone              142 virt/kvm/coalesced_mmio.c 					 struct kvm_coalesced_mmio_zone *zone)
zone              147 virt/kvm/coalesced_mmio.c 	if (zone->pio != 1 && zone->pio != 0)
zone              157 virt/kvm/coalesced_mmio.c 	dev->zone = *zone;
zone              161 virt/kvm/coalesced_mmio.c 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
zone              162 virt/kvm/coalesced_mmio.c 				zone->addr, zone->size, &dev->dev);
zone              178 virt/kvm/coalesced_mmio.c 					   struct kvm_coalesced_mmio_zone *zone)
zone              182 virt/kvm/coalesced_mmio.c 	if (zone->pio != 1 && zone->pio != 0)
zone              188 virt/kvm/coalesced_mmio.c 		if (zone->pio == dev->zone.pio &&
zone              189 virt/kvm/coalesced_mmio.c 		    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
zone              191 virt/kvm/coalesced_mmio.c 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
zone               22 virt/kvm/coalesced_mmio.h 	struct kvm_coalesced_mmio_zone zone;
zone               28 virt/kvm/coalesced_mmio.h 					struct kvm_coalesced_mmio_zone *zone);
zone               30 virt/kvm/coalesced_mmio.h 					struct kvm_coalesced_mmio_zone *zone);
zone             3403 virt/kvm/kvm_main.c 		struct kvm_coalesced_mmio_zone zone;
zone             3406 virt/kvm/kvm_main.c 		if (copy_from_user(&zone, argp, sizeof(zone)))
zone             3408 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
zone             3412 virt/kvm/kvm_main.c 		struct kvm_coalesced_mmio_zone zone;
zone             3415 virt/kvm/kvm_main.c 		if (copy_from_user(&zone, argp, sizeof(zone)))
zone             3417 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);