groupc            241 kernel/sched/psi.c 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
groupc            251 kernel/sched/psi.c 		seq = read_seqcount_begin(&groupc->seq);
groupc            253 kernel/sched/psi.c 		memcpy(times, groupc->times, sizeof(groupc->times));
groupc            254 kernel/sched/psi.c 		state_mask = groupc->state_mask;
groupc            255 kernel/sched/psi.c 		state_start = groupc->state_start;
groupc            256 kernel/sched/psi.c 	} while (read_seqcount_retry(&groupc->seq, seq));
groupc            273 kernel/sched/psi.c 		delta = times[s] - groupc->times_prev[aggregator][s];
groupc            274 kernel/sched/psi.c 		groupc->times_prev[aggregator][s] = times[s];
groupc            626 kernel/sched/psi.c static void record_times(struct psi_group_cpu *groupc, int cpu,
groupc            633 kernel/sched/psi.c 	delta = now - groupc->state_start;
groupc            634 kernel/sched/psi.c 	groupc->state_start = now;
groupc            636 kernel/sched/psi.c 	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
groupc            637 kernel/sched/psi.c 		groupc->times[PSI_IO_SOME] += delta;
groupc            638 kernel/sched/psi.c 		if (groupc->state_mask & (1 << PSI_IO_FULL))
groupc            639 kernel/sched/psi.c 			groupc->times[PSI_IO_FULL] += delta;
groupc            642 kernel/sched/psi.c 	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
groupc            643 kernel/sched/psi.c 		groupc->times[PSI_MEM_SOME] += delta;
groupc            644 kernel/sched/psi.c 		if (groupc->state_mask & (1 << PSI_MEM_FULL))
groupc            645 kernel/sched/psi.c 			groupc->times[PSI_MEM_FULL] += delta;
groupc            661 kernel/sched/psi.c 			groupc->times[PSI_MEM_FULL] += sample;
groupc            665 kernel/sched/psi.c 	if (groupc->state_mask & (1 << PSI_CPU_SOME))
groupc            666 kernel/sched/psi.c 		groupc->times[PSI_CPU_SOME] += delta;
groupc            668 kernel/sched/psi.c 	if (groupc->state_mask & (1 << PSI_NONIDLE))
groupc            669 kernel/sched/psi.c 		groupc->times[PSI_NONIDLE] += delta;
groupc            675 kernel/sched/psi.c 	struct psi_group_cpu *groupc;
groupc            680 kernel/sched/psi.c 	groupc = per_cpu_ptr(group->pcpu, cpu);
groupc            690 kernel/sched/psi.c 	write_seqcount_begin(&groupc->seq);
groupc            692 kernel/sched/psi.c 	record_times(groupc, cpu, false);
groupc            697 kernel/sched/psi.c 		if (groupc->tasks[t] == 0 && !psi_bug) {
groupc            699 kernel/sched/psi.c 					cpu, t, groupc->tasks[0],
groupc            700 kernel/sched/psi.c 					groupc->tasks[1], groupc->tasks[2],
groupc            704 kernel/sched/psi.c 		groupc->tasks[t]--;
groupc            709 kernel/sched/psi.c 			groupc->tasks[t]++;
groupc            713 kernel/sched/psi.c 		if (test_state(groupc->tasks, s))
groupc            716 kernel/sched/psi.c 	groupc->state_mask = state_mask;
groupc            718 kernel/sched/psi.c 	write_seqcount_end(&groupc->seq);
groupc            797 kernel/sched/psi.c 		struct psi_group_cpu *groupc;
groupc            799 kernel/sched/psi.c 		groupc = per_cpu_ptr(group->pcpu, cpu);
groupc            800 kernel/sched/psi.c 		write_seqcount_begin(&groupc->seq);
groupc            801 kernel/sched/psi.c 		record_times(groupc, cpu, true);
groupc            802 kernel/sched/psi.c 		write_seqcount_end(&groupc->seq);