This source file includes following definitions.
- show_feedback_ctrs
- check_pcc_chan
- send_pcc_cmd
- cppc_chan_tx_done
- acpi_get_psd
- acpi_get_psd_map
- register_pcc_channel
- cpc_ffh_supported
- pcc_data_alloc
- is_cppc_supported
- acpi_cppc_processor_probe
- acpi_cppc_processor_exit
- cpc_read_ffh
- cpc_write_ffh
- cpc_read
- cpc_write
- cppc_get_desired_perf
- cppc_get_perf_caps
- cppc_get_perf_ctrs
- cppc_set_perf
- cppc_get_transition_latency
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #define pr_fmt(fmt) "ACPI CPPC: " fmt
35
36 #include <linux/cpufreq.h>
37 #include <linux/delay.h>
38 #include <linux/iopoll.h>
39 #include <linux/ktime.h>
40 #include <linux/rwsem.h>
41 #include <linux/wait.h>
42
43 #include <acpi/cppc_acpi.h>
44
45 struct cppc_pcc_data {
46 struct mbox_chan *pcc_channel;
47 void __iomem *pcc_comm_addr;
48 bool pcc_channel_acquired;
49 unsigned int deadline_us;
50 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
51
52 bool pending_pcc_write_cmd;
53 bool platform_owns_pcc;
54 unsigned int pcc_write_cnt;
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 struct rw_semaphore pcc_lock;
71
72
73 wait_queue_head_t pcc_write_wait_q;
74 ktime_t last_cmd_cmpl_time;
75 ktime_t last_mpar_reset;
76 int mpar_count;
77 int refcount;
78 };
79
80
81 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82
83 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
84
85
86
87
88
89
90
91
92 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
93
94
95 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
96 0x8 + (offs))
97
98
99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
102
103
104 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
109
110
111 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
114
115
116
117
118
119 #define NUM_RETRIES 500ULL
120
121 struct cppc_attr {
122 struct attribute attr;
123 ssize_t (*show)(struct kobject *kobj,
124 struct attribute *attr, char *buf);
125 ssize_t (*store)(struct kobject *kobj,
126 struct attribute *attr, const char *c, ssize_t count);
127 };
128
129 #define define_one_cppc_ro(_name) \
130 static struct cppc_attr _name = \
131 __ATTR(_name, 0444, show_##_name, NULL)
132
133 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
134
135 #define show_cppc_data(access_fn, struct_name, member_name) \
136 static ssize_t show_##member_name(struct kobject *kobj, \
137 struct attribute *attr, char *buf) \
138 { \
139 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
140 struct struct_name st_name = {0}; \
141 int ret; \
142 \
143 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
144 if (ret) \
145 return ret; \
146 \
147 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
148 (u64)st_name.member_name); \
149 } \
150 define_one_cppc_ro(member_name)
151
152 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
153 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
154 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
155 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
156 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
157 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
158
159 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
160 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
161
162 static ssize_t show_feedback_ctrs(struct kobject *kobj,
163 struct attribute *attr, char *buf)
164 {
165 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
166 struct cppc_perf_fb_ctrs fb_ctrs = {0};
167 int ret;
168
169 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
170 if (ret)
171 return ret;
172
173 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
174 fb_ctrs.reference, fb_ctrs.delivered);
175 }
176 define_one_cppc_ro(feedback_ctrs);
177
178 static struct attribute *cppc_attrs[] = {
179 &feedback_ctrs.attr,
180 &reference_perf.attr,
181 &wraparound_time.attr,
182 &highest_perf.attr,
183 &lowest_perf.attr,
184 &lowest_nonlinear_perf.attr,
185 &nominal_perf.attr,
186 &nominal_freq.attr,
187 &lowest_freq.attr,
188 NULL
189 };
190
191 static struct kobj_type cppc_ktype = {
192 .sysfs_ops = &kobj_sysfs_ops,
193 .default_attrs = cppc_attrs,
194 };
195
196 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
197 {
198 int ret, status;
199 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
200 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
201 pcc_ss_data->pcc_comm_addr;
202
203 if (!pcc_ss_data->platform_owns_pcc)
204 return 0;
205
206
207
208
209
210 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
211 status & PCC_CMD_COMPLETE_MASK, 3,
212 pcc_ss_data->deadline_us);
213
214 if (likely(!ret)) {
215 pcc_ss_data->platform_owns_pcc = false;
216 if (chk_err_bit && (status & PCC_ERROR_MASK))
217 ret = -EIO;
218 }
219
220 if (unlikely(ret))
221 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
222 pcc_ss_id, ret);
223
224 return ret;
225 }
226
227
228
229
230
231 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
232 {
233 int ret = -EIO, i;
234 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
235 struct acpi_pcct_shared_memory *generic_comm_base =
236 (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
237 unsigned int time_delta;
238
239
240
241
242
243 if (cmd == CMD_READ) {
244
245
246
247
248
249 if (pcc_ss_data->pending_pcc_write_cmd)
250 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
251
252 ret = check_pcc_chan(pcc_ss_id, false);
253 if (ret)
254 goto end;
255 } else
256 pcc_ss_data->pending_pcc_write_cmd = FALSE;
257
258
259
260
261
262
263 if (pcc_ss_data->pcc_mrtt) {
264 time_delta = ktime_us_delta(ktime_get(),
265 pcc_ss_data->last_cmd_cmpl_time);
266 if (pcc_ss_data->pcc_mrtt > time_delta)
267 udelay(pcc_ss_data->pcc_mrtt - time_delta);
268 }
269
270
271
272
273
274
275
276
277
278
279
280
281 if (pcc_ss_data->pcc_mpar) {
282 if (pcc_ss_data->mpar_count == 0) {
283 time_delta = ktime_ms_delta(ktime_get(),
284 pcc_ss_data->last_mpar_reset);
285 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
286 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
287 pcc_ss_id);
288 ret = -EIO;
289 goto end;
290 }
291 pcc_ss_data->last_mpar_reset = ktime_get();
292 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
293 }
294 pcc_ss_data->mpar_count--;
295 }
296
297
298 writew_relaxed(cmd, &generic_comm_base->command);
299
300
301 writew_relaxed(0, &generic_comm_base->status);
302
303 pcc_ss_data->platform_owns_pcc = true;
304
305
306 ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
307 if (ret < 0) {
308 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
309 pcc_ss_id, cmd, ret);
310 goto end;
311 }
312
313
314 ret = check_pcc_chan(pcc_ss_id, true);
315
316 if (pcc_ss_data->pcc_mrtt)
317 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
318
319 if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
320 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
321 else
322 mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
323
324 end:
325 if (cmd == CMD_WRITE) {
326 if (unlikely(ret)) {
327 for_each_possible_cpu(i) {
328 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
329 if (!desc)
330 continue;
331
332 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
333 desc->write_cmd_status = ret;
334 }
335 }
336 pcc_ss_data->pcc_write_cnt++;
337 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
338 }
339
340 return ret;
341 }
342
343 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
344 {
345 if (ret < 0)
346 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
347 *(u16 *)msg, ret);
348 else
349 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
350 *(u16 *)msg, ret);
351 }
352
353 struct mbox_client cppc_mbox_cl = {
354 .tx_done = cppc_chan_tx_done,
355 .knows_txdone = true,
356 };
357
358 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
359 {
360 int result = -EFAULT;
361 acpi_status status = AE_OK;
362 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
363 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
364 struct acpi_buffer state = {0, NULL};
365 union acpi_object *psd = NULL;
366 struct acpi_psd_package *pdomain;
367
368 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
369 &buffer, ACPI_TYPE_PACKAGE);
370 if (status == AE_NOT_FOUND)
371 return 0;
372 if (ACPI_FAILURE(status))
373 return -ENODEV;
374
375 psd = buffer.pointer;
376 if (!psd || psd->package.count != 1) {
377 pr_debug("Invalid _PSD data\n");
378 goto end;
379 }
380
381 pdomain = &(cpc_ptr->domain_info);
382
383 state.length = sizeof(struct acpi_psd_package);
384 state.pointer = pdomain;
385
386 status = acpi_extract_package(&(psd->package.elements[0]),
387 &format, &state);
388 if (ACPI_FAILURE(status)) {
389 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
390 goto end;
391 }
392
393 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
394 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
395 goto end;
396 }
397
398 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
399 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
400 goto end;
401 }
402
403 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
404 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
405 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
406 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
407 goto end;
408 }
409
410 result = 0;
411 end:
412 kfree(buffer.pointer);
413 return result;
414 }
415
416
417
418
419
420
421
422 int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
423 {
424 int count_target;
425 int retval = 0;
426 unsigned int i, j;
427 cpumask_var_t covered_cpus;
428 struct cppc_cpudata *pr, *match_pr;
429 struct acpi_psd_package *pdomain;
430 struct acpi_psd_package *match_pdomain;
431 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
432
433 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
434 return -ENOMEM;
435
436
437
438
439
440 for_each_possible_cpu(i) {
441 pr = all_cpu_data[i];
442 if (!pr)
443 continue;
444
445 if (cpumask_test_cpu(i, covered_cpus))
446 continue;
447
448 cpc_ptr = per_cpu(cpc_desc_ptr, i);
449 if (!cpc_ptr) {
450 retval = -EFAULT;
451 goto err_ret;
452 }
453
454 pdomain = &(cpc_ptr->domain_info);
455 cpumask_set_cpu(i, pr->shared_cpu_map);
456 cpumask_set_cpu(i, covered_cpus);
457 if (pdomain->num_processors <= 1)
458 continue;
459
460
461 count_target = pdomain->num_processors;
462 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
463 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
464 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
465 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
466 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
467 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
468
469 for_each_possible_cpu(j) {
470 if (i == j)
471 continue;
472
473 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
474 if (!match_cpc_ptr) {
475 retval = -EFAULT;
476 goto err_ret;
477 }
478
479 match_pdomain = &(match_cpc_ptr->domain_info);
480 if (match_pdomain->domain != pdomain->domain)
481 continue;
482
483
484 if (match_pdomain->num_processors != count_target) {
485 retval = -EFAULT;
486 goto err_ret;
487 }
488
489 if (pdomain->coord_type != match_pdomain->coord_type) {
490 retval = -EFAULT;
491 goto err_ret;
492 }
493
494 cpumask_set_cpu(j, covered_cpus);
495 cpumask_set_cpu(j, pr->shared_cpu_map);
496 }
497
498 for_each_possible_cpu(j) {
499 if (i == j)
500 continue;
501
502 match_pr = all_cpu_data[j];
503 if (!match_pr)
504 continue;
505
506 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
507 if (!match_cpc_ptr) {
508 retval = -EFAULT;
509 goto err_ret;
510 }
511
512 match_pdomain = &(match_cpc_ptr->domain_info);
513 if (match_pdomain->domain != pdomain->domain)
514 continue;
515
516 match_pr->shared_type = pr->shared_type;
517 cpumask_copy(match_pr->shared_cpu_map,
518 pr->shared_cpu_map);
519 }
520 }
521
522 err_ret:
523 for_each_possible_cpu(i) {
524 pr = all_cpu_data[i];
525 if (!pr)
526 continue;
527
528
529 if (retval) {
530 cpumask_clear(pr->shared_cpu_map);
531 cpumask_set_cpu(i, pr->shared_cpu_map);
532 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
533 }
534 }
535
536 free_cpumask_var(covered_cpus);
537 return retval;
538 }
539 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
540
541 static int register_pcc_channel(int pcc_ss_idx)
542 {
543 struct acpi_pcct_hw_reduced *cppc_ss;
544 u64 usecs_lat;
545
546 if (pcc_ss_idx >= 0) {
547 pcc_data[pcc_ss_idx]->pcc_channel =
548 pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
549
550 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
551 pr_err("Failed to find PCC channel for subspace %d\n",
552 pcc_ss_idx);
553 return -ENODEV;
554 }
555
556
557
558
559
560
561
562 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
563
564 if (!cppc_ss) {
565 pr_err("No PCC subspace found for %d CPPC\n",
566 pcc_ss_idx);
567 return -ENODEV;
568 }
569
570
571
572
573
574
575 usecs_lat = NUM_RETRIES * cppc_ss->latency;
576 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
577 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
578 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
579 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
580
581 pcc_data[pcc_ss_idx]->pcc_comm_addr =
582 acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
583 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
584 pr_err("Failed to ioremap PCC comm region mem for %d\n",
585 pcc_ss_idx);
586 return -ENOMEM;
587 }
588
589
590 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
591 }
592
593 return 0;
594 }
595
596
597
598
599
600
601
602
603
604 bool __weak cpc_ffh_supported(void)
605 {
606 return false;
607 }
608
609
610
611
612
613
614
615
616
617
618
619 int pcc_data_alloc(int pcc_ss_id)
620 {
621 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
622 return -EINVAL;
623
624 if (pcc_data[pcc_ss_id]) {
625 pcc_data[pcc_ss_id]->refcount++;
626 } else {
627 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
628 GFP_KERNEL);
629 if (!pcc_data[pcc_ss_id])
630 return -ENOMEM;
631 pcc_data[pcc_ss_id]->refcount++;
632 }
633
634 return 0;
635 }
636
637
638 static bool is_cppc_supported(int revision, int num_ent)
639 {
640 int expected_num_ent;
641
642 switch (revision) {
643 case CPPC_V2_REV:
644 expected_num_ent = CPPC_V2_NUM_ENT;
645 break;
646 case CPPC_V3_REV:
647 expected_num_ent = CPPC_V3_NUM_ENT;
648 break;
649 default:
650 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
651 revision);
652 return false;
653 }
654
655 if (expected_num_ent != num_ent) {
656 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
657 num_ent, expected_num_ent, revision);
658 return false;
659 }
660
661 return true;
662 }
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716 int acpi_cppc_processor_probe(struct acpi_processor *pr)
717 {
718 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
719 union acpi_object *out_obj, *cpc_obj;
720 struct cpc_desc *cpc_ptr;
721 struct cpc_reg *gas_t;
722 struct device *cpu_dev;
723 acpi_handle handle = pr->handle;
724 unsigned int num_ent, i, cpc_rev;
725 int pcc_subspace_id = -1;
726 acpi_status status;
727 int ret = -EFAULT;
728
729
730 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
731 ACPI_TYPE_PACKAGE);
732 if (ACPI_FAILURE(status)) {
733 ret = -ENODEV;
734 goto out_buf_free;
735 }
736
737 out_obj = (union acpi_object *) output.pointer;
738
739 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
740 if (!cpc_ptr) {
741 ret = -ENOMEM;
742 goto out_buf_free;
743 }
744
745
746 cpc_obj = &out_obj->package.elements[0];
747 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
748 num_ent = cpc_obj->integer.value;
749 } else {
750 pr_debug("Unexpected entry type(%d) for NumEntries\n",
751 cpc_obj->type);
752 goto out_free;
753 }
754 cpc_ptr->num_entries = num_ent;
755
756
757 cpc_obj = &out_obj->package.elements[1];
758 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
759 cpc_rev = cpc_obj->integer.value;
760 } else {
761 pr_debug("Unexpected entry type(%d) for Revision\n",
762 cpc_obj->type);
763 goto out_free;
764 }
765 cpc_ptr->version = cpc_rev;
766
767 if (!is_cppc_supported(cpc_rev, num_ent))
768 goto out_free;
769
770
771 for (i = 2; i < num_ent; i++) {
772 cpc_obj = &out_obj->package.elements[i];
773
774 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
775 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
776 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
777 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
778 gas_t = (struct cpc_reg *)
779 cpc_obj->buffer.pointer;
780
781
782
783
784
785
786
787 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
788 if (pcc_subspace_id < 0) {
789 pcc_subspace_id = gas_t->access_width;
790 if (pcc_data_alloc(pcc_subspace_id))
791 goto out_free;
792 } else if (pcc_subspace_id != gas_t->access_width) {
793 pr_debug("Mismatched PCC ids.\n");
794 goto out_free;
795 }
796 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
797 if (gas_t->address) {
798 void __iomem *addr;
799
800 addr = ioremap(gas_t->address, gas_t->bit_width/8);
801 if (!addr)
802 goto out_free;
803 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
804 }
805 } else {
806 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
807
808 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
809 goto out_free;
810 }
811 }
812
813 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
814 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
815 } else {
816 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
817 goto out_free;
818 }
819 }
820 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
821
822
823
824
825
826
827 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
828 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
829 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
830 }
831
832
833
834 cpc_ptr->cpu_id = pr->id;
835
836
837 ret = acpi_get_psd(cpc_ptr, handle);
838 if (ret)
839 goto out_free;
840
841
842 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
843 ret = register_pcc_channel(pcc_subspace_id);
844 if (ret)
845 goto out_free;
846
847 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
848 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
849 }
850
851
852 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
853
854
855 cpu_dev = get_cpu_device(pr->id);
856 if (!cpu_dev) {
857 ret = -EINVAL;
858 goto out_free;
859 }
860
861
862 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
863
864 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
865 "acpi_cppc");
866 if (ret) {
867 per_cpu(cpc_desc_ptr, pr->id) = NULL;
868 kobject_put(&cpc_ptr->kobj);
869 goto out_free;
870 }
871
872 kfree(output.pointer);
873 return 0;
874
875 out_free:
876
877 for (i = 2; i < cpc_ptr->num_entries; i++) {
878 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
879
880 if (addr)
881 iounmap(addr);
882 }
883 kfree(cpc_ptr);
884
885 out_buf_free:
886 kfree(output.pointer);
887 return ret;
888 }
889 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
890
891
892
893
894
895
896
897 void acpi_cppc_processor_exit(struct acpi_processor *pr)
898 {
899 struct cpc_desc *cpc_ptr;
900 unsigned int i;
901 void __iomem *addr;
902 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
903
904 if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
905 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
906 pcc_data[pcc_ss_id]->refcount--;
907 if (!pcc_data[pcc_ss_id]->refcount) {
908 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
909 kfree(pcc_data[pcc_ss_id]);
910 pcc_data[pcc_ss_id] = NULL;
911 }
912 }
913 }
914
915 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
916 if (!cpc_ptr)
917 return;
918
919
920 for (i = 2; i < cpc_ptr->num_entries; i++) {
921 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
922 if (addr)
923 iounmap(addr);
924 }
925
926 kobject_put(&cpc_ptr->kobj);
927 kfree(cpc_ptr);
928 }
929 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
930
931
932
933
934
935
936
937
938
939
940
941 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
942 {
943 return -ENOTSUPP;
944 }
945
946
947
948
949
950
951
952
953
954
955
956 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
957 {
958 return -ENOTSUPP;
959 }
960
961
962
963
964
965
966
967 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
968 {
969 int ret_val = 0;
970 void __iomem *vaddr = 0;
971 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
972 struct cpc_reg *reg = ®_res->cpc_entry.reg;
973
974 if (reg_res->type == ACPI_TYPE_INTEGER) {
975 *val = reg_res->cpc_entry.int_value;
976 return ret_val;
977 }
978
979 *val = 0;
980 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
981 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
982 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
983 vaddr = reg_res->sys_mem_vaddr;
984 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
985 return cpc_read_ffh(cpu, reg, val);
986 else
987 return acpi_os_read_memory((acpi_physical_address)reg->address,
988 val, reg->bit_width);
989
990 switch (reg->bit_width) {
991 case 8:
992 *val = readb_relaxed(vaddr);
993 break;
994 case 16:
995 *val = readw_relaxed(vaddr);
996 break;
997 case 32:
998 *val = readl_relaxed(vaddr);
999 break;
1000 case 64:
1001 *val = readq_relaxed(vaddr);
1002 break;
1003 default:
1004 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1005 reg->bit_width, pcc_ss_id);
1006 ret_val = -EFAULT;
1007 }
1008
1009 return ret_val;
1010 }
1011
1012 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1013 {
1014 int ret_val = 0;
1015 void __iomem *vaddr = 0;
1016 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1017 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1018
1019 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1020 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1021 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1022 vaddr = reg_res->sys_mem_vaddr;
1023 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1024 return cpc_write_ffh(cpu, reg, val);
1025 else
1026 return acpi_os_write_memory((acpi_physical_address)reg->address,
1027 val, reg->bit_width);
1028
1029 switch (reg->bit_width) {
1030 case 8:
1031 writeb_relaxed(val, vaddr);
1032 break;
1033 case 16:
1034 writew_relaxed(val, vaddr);
1035 break;
1036 case 32:
1037 writel_relaxed(val, vaddr);
1038 break;
1039 case 64:
1040 writeq_relaxed(val, vaddr);
1041 break;
1042 default:
1043 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1044 reg->bit_width, pcc_ss_id);
1045 ret_val = -EFAULT;
1046 break;
1047 }
1048
1049 return ret_val;
1050 }
1051
1052
1053
1054
1055
1056
1057
1058
1059 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1060 {
1061 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1062 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1063 struct cpc_register_resource *desired_reg;
1064 struct cppc_pcc_data *pcc_ss_data = NULL;
1065
1066 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1067
1068 if (CPC_IN_PCC(desired_reg)) {
1069 int ret = 0;
1070
1071 if (pcc_ss_id < 0)
1072 return -EIO;
1073
1074 pcc_ss_data = pcc_data[pcc_ss_id];
1075
1076 down_write(&pcc_ss_data->pcc_lock);
1077
1078 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1079 cpc_read(cpunum, desired_reg, desired_perf);
1080 else
1081 ret = -EIO;
1082
1083 up_write(&pcc_ss_data->pcc_lock);
1084
1085 return ret;
1086 }
1087
1088 cpc_read(cpunum, desired_reg, desired_perf);
1089
1090 return 0;
1091 }
1092 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1093
1094
1095
1096
1097
1098
1099
1100
1101 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1102 {
1103 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1104 struct cpc_register_resource *highest_reg, *lowest_reg,
1105 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1106 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1107 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1108 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1109 struct cppc_pcc_data *pcc_ss_data = NULL;
1110 int ret = 0, regs_in_pcc = 0;
1111
1112 if (!cpc_desc) {
1113 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1114 return -ENODEV;
1115 }
1116
1117 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1118 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1119 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1120 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1121 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1122 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1123 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1124
1125
1126 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1127 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1128 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1129 if (pcc_ss_id < 0) {
1130 pr_debug("Invalid pcc_ss_id\n");
1131 return -ENODEV;
1132 }
1133 pcc_ss_data = pcc_data[pcc_ss_id];
1134 regs_in_pcc = 1;
1135 down_write(&pcc_ss_data->pcc_lock);
1136
1137 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1138 ret = -EIO;
1139 goto out_err;
1140 }
1141 }
1142
1143 cpc_read(cpunum, highest_reg, &high);
1144 perf_caps->highest_perf = high;
1145
1146 cpc_read(cpunum, lowest_reg, &low);
1147 perf_caps->lowest_perf = low;
1148
1149 cpc_read(cpunum, nominal_reg, &nom);
1150 perf_caps->nominal_perf = nom;
1151
1152 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1153 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1154 perf_caps->guaranteed_perf = 0;
1155 } else {
1156 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1157 perf_caps->guaranteed_perf = guaranteed;
1158 }
1159
1160 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1161 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1162
1163 if (!high || !low || !nom || !min_nonlinear)
1164 ret = -EFAULT;
1165
1166
1167 if (CPC_SUPPORTED(low_freq_reg))
1168 cpc_read(cpunum, low_freq_reg, &low_f);
1169
1170 if (CPC_SUPPORTED(nom_freq_reg))
1171 cpc_read(cpunum, nom_freq_reg, &nom_f);
1172
1173 perf_caps->lowest_freq = low_f;
1174 perf_caps->nominal_freq = nom_f;
1175
1176
1177 out_err:
1178 if (regs_in_pcc)
1179 up_write(&pcc_ss_data->pcc_lock);
1180 return ret;
1181 }
1182 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1183
1184
1185
1186
1187
1188
1189
1190
1191 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1192 {
1193 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1194 struct cpc_register_resource *delivered_reg, *reference_reg,
1195 *ref_perf_reg, *ctr_wrap_reg;
1196 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1197 struct cppc_pcc_data *pcc_ss_data = NULL;
1198 u64 delivered, reference, ref_perf, ctr_wrap_time;
1199 int ret = 0, regs_in_pcc = 0;
1200
1201 if (!cpc_desc) {
1202 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1203 return -ENODEV;
1204 }
1205
1206 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1207 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1208 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1209 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1210
1211
1212
1213
1214
1215 if (!CPC_SUPPORTED(ref_perf_reg))
1216 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1217
1218
1219 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1220 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1221 if (pcc_ss_id < 0) {
1222 pr_debug("Invalid pcc_ss_id\n");
1223 return -ENODEV;
1224 }
1225 pcc_ss_data = pcc_data[pcc_ss_id];
1226 down_write(&pcc_ss_data->pcc_lock);
1227 regs_in_pcc = 1;
1228
1229 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1230 ret = -EIO;
1231 goto out_err;
1232 }
1233 }
1234
1235 cpc_read(cpunum, delivered_reg, &delivered);
1236 cpc_read(cpunum, reference_reg, &reference);
1237 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1238
1239
1240
1241
1242
1243
1244 ctr_wrap_time = (u64)(~((u64)0));
1245 if (CPC_SUPPORTED(ctr_wrap_reg))
1246 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1247
1248 if (!delivered || !reference || !ref_perf) {
1249 ret = -EFAULT;
1250 goto out_err;
1251 }
1252
1253 perf_fb_ctrs->delivered = delivered;
1254 perf_fb_ctrs->reference = reference;
1255 perf_fb_ctrs->reference_perf = ref_perf;
1256 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1257 out_err:
1258 if (regs_in_pcc)
1259 up_write(&pcc_ss_data->pcc_lock);
1260 return ret;
1261 }
1262 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1263
1264
1265
1266
1267
1268
1269
1270
1271 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1272 {
1273 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1274 struct cpc_register_resource *desired_reg;
1275 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1276 struct cppc_pcc_data *pcc_ss_data = NULL;
1277 int ret = 0;
1278
1279 if (!cpc_desc) {
1280 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1281 return -ENODEV;
1282 }
1283
1284 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1285
1286
1287
1288
1289
1290
1291
1292
1293 if (CPC_IN_PCC(desired_reg)) {
1294 if (pcc_ss_id < 0) {
1295 pr_debug("Invalid pcc_ss_id\n");
1296 return -ENODEV;
1297 }
1298 pcc_ss_data = pcc_data[pcc_ss_id];
1299 down_read(&pcc_ss_data->pcc_lock);
1300 if (pcc_ss_data->platform_owns_pcc) {
1301 ret = check_pcc_chan(pcc_ss_id, false);
1302 if (ret) {
1303 up_read(&pcc_ss_data->pcc_lock);
1304 return ret;
1305 }
1306 }
1307
1308
1309
1310
1311 pcc_ss_data->pending_pcc_write_cmd = true;
1312 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1313 cpc_desc->write_cmd_status = 0;
1314 }
1315
1316
1317
1318
1319
1320 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1321
1322 if (CPC_IN_PCC(desired_reg))
1323 up_read(&pcc_ss_data->pcc_lock);
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 if (CPC_IN_PCC(desired_reg)) {
1371 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {
1372
1373 if (pcc_ss_data->pending_pcc_write_cmd)
1374 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1375 up_write(&pcc_ss_data->pcc_lock);
1376 } else
1377
1378 wait_event(pcc_ss_data->pcc_write_wait_q,
1379 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1380
1381
1382 ret = cpc_desc->write_cmd_status;
1383 }
1384 return ret;
1385 }
1386 EXPORT_SYMBOL_GPL(cppc_set_perf);
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396 unsigned int cppc_get_transition_latency(int cpu_num)
1397 {
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409 unsigned int latency_ns = 0;
1410 struct cpc_desc *cpc_desc;
1411 struct cpc_register_resource *desired_reg;
1412 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1413 struct cppc_pcc_data *pcc_ss_data;
1414
1415 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1416 if (!cpc_desc)
1417 return CPUFREQ_ETERNAL;
1418
1419 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1420 if (!CPC_IN_PCC(desired_reg))
1421 return CPUFREQ_ETERNAL;
1422
1423 if (pcc_ss_id < 0)
1424 return CPUFREQ_ETERNAL;
1425
1426 pcc_ss_data = pcc_data[pcc_ss_id];
1427 if (pcc_ss_data->pcc_mpar)
1428 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1429
1430 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1431 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1432
1433 return latency_ns;
1434 }
1435 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);