Lines Matching refs:perf

202 	struct acpi_processor_performance *perf;  in extract_io()  local
205 perf = data->acpi_data; in extract_io()
207 for (i = 0; i < perf->state_count; i++) { in extract_io()
208 if (value == perf->states[i].status) in extract_io()
217 struct acpi_processor_performance *perf; in extract_msr() local
224 perf = data->acpi_data; in extract_msr()
227 if (msr == perf->states[pos->driver_data].status) in extract_msr()
332 struct acpi_processor_performance *perf; in get_cur_val() local
349 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; in get_cur_val()
350 cmd.addr.io.port = perf->control_register.address; in get_cur_val()
351 cmd.addr.io.bit_width = perf->control_register.bit_width; in get_cur_val()
412 struct acpi_processor_performance *perf; in acpi_cpufreq_target() local
422 perf = data->acpi_data; in acpi_cpufreq_target()
424 if (perf->state == next_perf_state) { in acpi_cpufreq_target()
440 cmd.val = (u32) perf->states[next_perf_state].control; in acpi_cpufreq_target()
445 cmd.val = (u32) perf->states[next_perf_state].control; in acpi_cpufreq_target()
449 cmd.addr.io.port = perf->control_register.address; in acpi_cpufreq_target()
450 cmd.addr.io.bit_width = perf->control_register.bit_width; in acpi_cpufreq_target()
451 cmd.val = (u32) perf->states[next_perf_state].control; in acpi_cpufreq_target()
476 perf->state = next_perf_state; in acpi_cpufreq_target()
485 struct acpi_processor_performance *perf = data->acpi_data; in acpi_cpufreq_guess_freq() local
491 unsigned long freqn = perf->states[0].core_frequency * 1000; in acpi_cpufreq_guess_freq()
493 for (i = 0; i < (perf->state_count-1); i++) { in acpi_cpufreq_guess_freq()
495 freqn = perf->states[i+1].core_frequency * 1000; in acpi_cpufreq_guess_freq()
497 perf->state = i; in acpi_cpufreq_guess_freq()
501 perf->state = perf->state_count-1; in acpi_cpufreq_guess_freq()
505 perf->state = 0; in acpi_cpufreq_guess_freq()
506 return perf->states[0].core_frequency * 1000; in acpi_cpufreq_guess_freq()
651 struct acpi_processor_performance *perf; in acpi_cpufreq_cpu_init() local
685 perf = data->acpi_data; in acpi_cpufreq_cpu_init()
686 policy->shared_type = perf->shared_type; in acpi_cpufreq_cpu_init()
694 cpumask_copy(policy->cpus, perf->shared_cpu_map); in acpi_cpufreq_cpu_init()
696 cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map); in acpi_cpufreq_cpu_init()
715 if (perf->state_count <= 1) { in acpi_cpufreq_cpu_init()
721 if (perf->control_register.space_id != perf->status_register.space_id) { in acpi_cpufreq_cpu_init()
726 switch (perf->control_register.space_id) { in acpi_cpufreq_cpu_init()
751 (u32) (perf->control_register.space_id)); in acpi_cpufreq_cpu_init()
757 (perf->state_count+1), GFP_KERNEL); in acpi_cpufreq_cpu_init()
765 for (i = 0; i < perf->state_count; i++) { in acpi_cpufreq_cpu_init()
766 if ((perf->states[i].transition_latency * 1000) > in acpi_cpufreq_cpu_init()
769 perf->states[i].transition_latency * 1000; in acpi_cpufreq_cpu_init()
773 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && in acpi_cpufreq_cpu_init()
781 for (i = 0; i < perf->state_count; i++) { in acpi_cpufreq_cpu_init()
782 if (i > 0 && perf->states[i].core_frequency >= in acpi_cpufreq_cpu_init()
788 perf->states[i].core_frequency * 1000; in acpi_cpufreq_cpu_init()
792 perf->state = 0; in acpi_cpufreq_cpu_init()
798 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) in acpi_cpufreq_cpu_init()
801 switch (perf->control_register.space_id) { in acpi_cpufreq_cpu_init()
822 for (i = 0; i < perf->state_count; i++) in acpi_cpufreq_cpu_init()
824 (i == perf->state ? '*' : ' '), i, in acpi_cpufreq_cpu_init()
825 (u32) perf->states[i].core_frequency, in acpi_cpufreq_cpu_init()
826 (u32) perf->states[i].power, in acpi_cpufreq_cpu_init()
827 (u32) perf->states[i].transition_latency); in acpi_cpufreq_cpu_init()
840 acpi_processor_unregister_performance(perf, cpu); in acpi_cpufreq_cpu_init()