This source file includes following definitions.
- amdgpu_pll_reduce_ratio
- amdgpu_pll_get_fb_ref_div
- amdgpu_pll_compute
- amdgpu_pll_get_use_mask
- amdgpu_pll_get_shared_dp_ppll
- amdgpu_pll_get_shared_nondp_ppll
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atom.h"
27 #include "atombios_encoders.h"
28 #include "amdgpu_pll.h"
29 #include <asm/div64.h>
30 #include <linux/gcd.h>
31
32
33
34
35
36
37
38
39
40
41
42
43
44 static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
45 unsigned nom_min, unsigned den_min)
46 {
47 unsigned tmp;
48
49
50 tmp = gcd(*nom, *den);
51 *nom /= tmp;
52 *den /= tmp;
53
54
55 if (*nom < nom_min) {
56 tmp = DIV_ROUND_UP(nom_min, *nom);
57 *nom *= tmp;
58 *den *= tmp;
59 }
60
61
62 if (*den < den_min) {
63 tmp = DIV_ROUND_UP(den_min, *den);
64 *nom *= tmp;
65 *den *= tmp;
66 }
67 }
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83 static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
84 unsigned fb_div_max, unsigned ref_div_max,
85 unsigned *fb_div, unsigned *ref_div)
86 {
87
88 ref_div_max = min(128 / post_div, ref_div_max);
89
90
91 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
92 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
93
94
95 if (*fb_div > fb_div_max) {
96 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
97 *fb_div = fb_div_max;
98 }
99 }
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 void amdgpu_pll_compute(struct amdgpu_pll *pll,
115 u32 freq,
116 u32 *dot_clock_p,
117 u32 *fb_div_p,
118 u32 *frac_fb_div_p,
119 u32 *ref_div_p,
120 u32 *post_div_p)
121 {
122 unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
123 freq : freq / 10;
124
125 unsigned fb_div_min, fb_div_max, fb_div;
126 unsigned post_div_min, post_div_max, post_div;
127 unsigned ref_div_min, ref_div_max, ref_div;
128 unsigned post_div_best, diff_best;
129 unsigned nom, den;
130
131
132 fb_div_min = pll->min_feedback_div;
133 fb_div_max = pll->max_feedback_div;
134
135 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
136 fb_div_min *= 10;
137 fb_div_max *= 10;
138 }
139
140
141 if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
142 ref_div_min = pll->reference_div;
143 else
144 ref_div_min = pll->min_ref_div;
145
146 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
147 pll->flags & AMDGPU_PLL_USE_REF_DIV)
148 ref_div_max = pll->reference_div;
149 else
150 ref_div_max = pll->max_ref_div;
151
152
153 if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
154 post_div_min = pll->post_div;
155 post_div_max = pll->post_div;
156 } else {
157 unsigned vco_min, vco_max;
158
159 if (pll->flags & AMDGPU_PLL_IS_LCD) {
160 vco_min = pll->lcd_pll_out_min;
161 vco_max = pll->lcd_pll_out_max;
162 } else {
163 vco_min = pll->pll_out_min;
164 vco_max = pll->pll_out_max;
165 }
166
167 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
168 vco_min *= 10;
169 vco_max *= 10;
170 }
171
172 post_div_min = vco_min / target_clock;
173 if ((target_clock * post_div_min) < vco_min)
174 ++post_div_min;
175 if (post_div_min < pll->min_post_div)
176 post_div_min = pll->min_post_div;
177
178 post_div_max = vco_max / target_clock;
179 if ((target_clock * post_div_max) > vco_max)
180 --post_div_max;
181 if (post_div_max > pll->max_post_div)
182 post_div_max = pll->max_post_div;
183 }
184
185
186 nom = target_clock;
187 den = pll->reference_freq;
188
189
190 amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
191
192
193 if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
194 post_div_best = post_div_min;
195 else
196 post_div_best = post_div_max;
197 diff_best = ~0;
198
199 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
200 unsigned diff;
201 amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
202 ref_div_max, &fb_div, &ref_div);
203 diff = abs(target_clock - (pll->reference_freq * fb_div) /
204 (ref_div * post_div));
205
206 if (diff < diff_best || (diff == diff_best &&
207 !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
208
209 post_div_best = post_div;
210 diff_best = diff;
211 }
212 }
213 post_div = post_div_best;
214
215
216 amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
217 &fb_div, &ref_div);
218
219
220
221 amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
222
223
224 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
225 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
226 if (fb_div < fb_div_min) {
227 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
228 fb_div *= tmp;
229 ref_div *= tmp;
230 }
231 }
232
233
234 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
235 *fb_div_p = fb_div / 10;
236 *frac_fb_div_p = fb_div % 10;
237 } else {
238 *fb_div_p = fb_div;
239 *frac_fb_div_p = 0;
240 }
241
242 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
243 (pll->reference_freq * *frac_fb_div_p)) /
244 (ref_div * post_div * 10);
245 *ref_div_p = ref_div;
246 *post_div_p = post_div;
247
248 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
249 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
250 ref_div, post_div);
251 }
252
253
254
255
256
257
258
259
260 u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
261 {
262 struct drm_device *dev = crtc->dev;
263 struct drm_crtc *test_crtc;
264 struct amdgpu_crtc *test_amdgpu_crtc;
265 u32 pll_in_use = 0;
266
267 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
268 if (crtc == test_crtc)
269 continue;
270
271 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
272 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
273 pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
274 }
275 return pll_in_use;
276 }
277
278
279
280
281
282
283
284
285
286
287 int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
288 {
289 struct drm_device *dev = crtc->dev;
290 struct drm_crtc *test_crtc;
291 struct amdgpu_crtc *test_amdgpu_crtc;
292
293 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
294 if (crtc == test_crtc)
295 continue;
296 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
297 if (test_amdgpu_crtc->encoder &&
298 ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
299
300 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
301 return test_amdgpu_crtc->pll_id;
302 }
303 }
304 return ATOM_PPLL_INVALID;
305 }
306
307
308
309
310
311
312
313
314
315
316 int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
317 {
318 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
319 struct drm_device *dev = crtc->dev;
320 struct drm_crtc *test_crtc;
321 struct amdgpu_crtc *test_amdgpu_crtc;
322 u32 adjusted_clock, test_adjusted_clock;
323
324 adjusted_clock = amdgpu_crtc->adjusted_clock;
325
326 if (adjusted_clock == 0)
327 return ATOM_PPLL_INVALID;
328
329 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
330 if (crtc == test_crtc)
331 continue;
332 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
333 if (test_amdgpu_crtc->encoder &&
334 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
335
336 if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
337
338 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
339 return test_amdgpu_crtc->pll_id;
340 }
341
342 test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
343 if ((crtc->mode.clock == test_crtc->mode.clock) &&
344 (adjusted_clock == test_adjusted_clock) &&
345 (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
346 (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
347 return test_amdgpu_crtc->pll_id;
348 }
349 }
350 return ATOM_PPLL_INVALID;
351 }