covered            42 arch/x86/kernel/cpu/mce/severity.c 	unsigned char covered;
covered           325 arch/x86/kernel/cpu/mce/severity.c 		s->covered = 1;
covered           367 arch/x86/kernel/cpu/mce/severity.c 	seq_printf(f, "%d\t%s\n", ser->covered, ser->msg);
covered           389 arch/x86/kernel/cpu/mce/severity.c 		severities[i].covered = 0;
covered           937 drivers/hv/hv_balloon.c 	int covered;
covered           944 drivers/hv/hv_balloon.c 		covered = pfn_covered(pg_start, pfn_cnt);
covered           945 drivers/hv/hv_balloon.c 		if (covered < 0)
covered           948 drivers/hv/hv_balloon.c 		if (covered)
covered           931 kernel/sched/topology.c 	struct cpumask *covered = sched_domains_tmpmask;
covered           936 kernel/sched/topology.c 	cpumask_clear(covered);
covered           941 kernel/sched/topology.c 		if (cpumask_test_cpu(i, covered))
covered           964 kernel/sched/topology.c 		cpumask_or(covered, covered, sg_span);
covered          1107 kernel/sched/topology.c 	struct cpumask *covered;
covered          1111 kernel/sched/topology.c 	covered = sched_domains_tmpmask;
covered          1113 kernel/sched/topology.c 	cpumask_clear(covered);
covered          1118 kernel/sched/topology.c 		if (cpumask_test_cpu(i, covered))
covered          1123 kernel/sched/topology.c 		cpumask_or(covered, covered, sched_group_span(sg));