group_cnt 408 drivers/md/raid5-cache.c (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1)); group_cnt 992 drivers/md/raid5.c should_defer = conf->batch_bio_dispatch && conf->group_cnt; group_cnt 5347 drivers/md/raid5.c for (i = 0; i < conf->group_cnt; i++) { group_cnt 6600 drivers/md/raid5.c int *group_cnt, group_cnt 6610 drivers/md/raid5.c int group_cnt, worker_cnt_per_group; group_cnt 6634 drivers/md/raid5.c &group_cnt, &worker_cnt_per_group, group_cnt 6638 drivers/md/raid5.c conf->group_cnt = group_cnt; group_cnt 6676 drivers/md/raid5.c int *group_cnt, group_cnt 6686 drivers/md/raid5.c *group_cnt = 0; group_cnt 6690 drivers/md/raid5.c *group_cnt = num_possible_nodes(); group_cnt 6692 drivers/md/raid5.c workers = kcalloc(size, *group_cnt, GFP_NOIO); group_cnt 6693 drivers/md/raid5.c *worker_groups = kcalloc(*group_cnt, sizeof(struct r5worker_group), group_cnt 6701 drivers/md/raid5.c for (i = 0; i < *group_cnt; i++) { group_cnt 6885 drivers/md/raid5.c int group_cnt, worker_cnt_per_group; group_cnt 6931 drivers/md/raid5.c if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, group_cnt 6933 drivers/md/raid5.c conf->group_cnt = group_cnt; group_cnt 679 drivers/md/raid5.h int group_cnt; group_cnt 881 drivers/pinctrl/sprd/pinctrl-sprd.c unsigned int group_cnt, cnt; group_cnt 883 drivers/pinctrl/sprd/pinctrl-sprd.c group_cnt = of_get_child_count(np); group_cnt 888 drivers/pinctrl/sprd/pinctrl-sprd.c group_cnt += cnt; group_cnt 891 drivers/pinctrl/sprd/pinctrl-sprd.c return group_cnt; group_cnt 2535 mm/percpu.c static int group_cnt[NR_CPUS] __initdata; group_cnt 2547 mm/percpu.c memset(group_cnt, 0, sizeof(group_cnt)); group_cnt 2585 mm/percpu.c group_cnt[group]++; group_cnt 2601 mm/percpu.c int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); group_cnt 2603 mm/percpu.c wasted += this_allocs * upa - group_cnt[group]; group_cnt 2624 mm/percpu.c nr_units += roundup(group_cnt[group], upa); group_cnt 2633 mm/percpu.c cpu_map += roundup(group_cnt[group], upa);