Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/drivers/gpu/drm/i915/intel_display.c

  1 /*
  2  * Copyright © 2006-2007 Intel Corporation
  3  *
  4  * Permission is hereby granted, free of charge, to any person obtaining a
  5  * copy of this software and associated documentation files (the "Software"),
  6  * to deal in the Software without restriction, including without limitation
  7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8  * and/or sell copies of the Software, and to permit persons to whom the
  9  * Software is furnished to do so, subject to the following conditions:
 10  *
 11  * The above copyright notice and this permission notice (including the next
 12  * paragraph) shall be included in all copies or substantial portions of the
 13  * Software.
 14  *
 15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 21  * DEALINGS IN THE SOFTWARE.
 22  *
 23  * Authors:
 24  *      Eric Anholt <eric@anholt.net>
 25  */
 26 
 27 #include <linux/dmi.h>
 28 #include <linux/module.h>
 29 #include <linux/input.h>
 30 #include <linux/i2c.h>
 31 #include <linux/kernel.h>
 32 #include <linux/slab.h>
 33 #include <linux/vgaarb.h>
 34 #include <drm/drm_edid.h>
 35 #include <drm/drmP.h>
 36 #include "intel_drv.h"
 37 #include <drm/i915_drm.h>
 38 #include "i915_drv.h"
 39 #include "i915_trace.h"
 40 #include <drm/drm_dp_helper.h>
 41 #include <drm/drm_crtc_helper.h>
 42 #include <linux/dma_remapping.h>
 43 
 44 #define DIV_ROUND_CLOSEST_ULL(ll, d)    \
 45         ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 46 
 47 static void intel_increase_pllclock(struct drm_crtc *crtc);
 48 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 49 
 50 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 51                                 struct intel_crtc_config *pipe_config);
 52 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 53                                    struct intel_crtc_config *pipe_config);
 54 
 55 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
 56                           int x, int y, struct drm_framebuffer *old_fb);
 57 static int intel_framebuffer_init(struct drm_device *dev,
 58                                   struct intel_framebuffer *ifb,
 59                                   struct drm_mode_fb_cmd2 *mode_cmd,
 60                                   struct drm_i915_gem_object *obj);
 61 static void intel_dp_set_m_n(struct intel_crtc *crtc);
 62 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
 63 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
 64 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 65                                          struct intel_link_m_n *m_n);
 66 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
 67 static void haswell_set_pipeconf(struct drm_crtc *crtc);
 68 static void intel_set_pipe_csc(struct drm_crtc *crtc);
 69 static void vlv_prepare_pll(struct intel_crtc *crtc);
 70 
 71 typedef struct {
 72         int     min, max;
 73 } intel_range_t;
 74 
 75 typedef struct {
 76         int     dot_limit;
 77         int     p2_slow, p2_fast;
 78 } intel_p2_t;
 79 
 80 typedef struct intel_limit intel_limit_t;
 81 struct intel_limit {
 82         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
 83         intel_p2_t          p2;
 84 };
 85 
 86 int
 87 intel_pch_rawclk(struct drm_device *dev)
 88 {
 89         struct drm_i915_private *dev_priv = dev->dev_private;
 90 
 91         WARN_ON(!HAS_PCH_SPLIT(dev));
 92 
 93         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
 94 }
 95 
 96 static inline u32 /* units of 100MHz */
 97 intel_fdi_link_freq(struct drm_device *dev)
 98 {
 99         if (IS_GEN5(dev)) {
100                 struct drm_i915_private *dev_priv = dev->dev_private;
101                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
102         } else
103                 return 27;
104 }
105 
106 static const intel_limit_t intel_limits_i8xx_dac = {
107         .dot = { .min = 25000, .max = 350000 },
108         .vco = { .min = 908000, .max = 1512000 },
109         .n = { .min = 2, .max = 16 },
110         .m = { .min = 96, .max = 140 },
111         .m1 = { .min = 18, .max = 26 },
112         .m2 = { .min = 6, .max = 16 },
113         .p = { .min = 4, .max = 128 },
114         .p1 = { .min = 2, .max = 33 },
115         .p2 = { .dot_limit = 165000,
116                 .p2_slow = 4, .p2_fast = 2 },
117 };
118 
119 static const intel_limit_t intel_limits_i8xx_dvo = {
120         .dot = { .min = 25000, .max = 350000 },
121         .vco = { .min = 908000, .max = 1512000 },
122         .n = { .min = 2, .max = 16 },
123         .m = { .min = 96, .max = 140 },
124         .m1 = { .min = 18, .max = 26 },
125         .m2 = { .min = 6, .max = 16 },
126         .p = { .min = 4, .max = 128 },
127         .p1 = { .min = 2, .max = 33 },
128         .p2 = { .dot_limit = 165000,
129                 .p2_slow = 4, .p2_fast = 4 },
130 };
131 
132 static const intel_limit_t intel_limits_i8xx_lvds = {
133         .dot = { .min = 25000, .max = 350000 },
134         .vco = { .min = 908000, .max = 1512000 },
135         .n = { .min = 2, .max = 16 },
136         .m = { .min = 96, .max = 140 },
137         .m1 = { .min = 18, .max = 26 },
138         .m2 = { .min = 6, .max = 16 },
139         .p = { .min = 4, .max = 128 },
140         .p1 = { .min = 1, .max = 6 },
141         .p2 = { .dot_limit = 165000,
142                 .p2_slow = 14, .p2_fast = 7 },
143 };
144 
145 static const intel_limit_t intel_limits_i9xx_sdvo = {
146         .dot = { .min = 20000, .max = 400000 },
147         .vco = { .min = 1400000, .max = 2800000 },
148         .n = { .min = 1, .max = 6 },
149         .m = { .min = 70, .max = 120 },
150         .m1 = { .min = 8, .max = 18 },
151         .m2 = { .min = 3, .max = 7 },
152         .p = { .min = 5, .max = 80 },
153         .p1 = { .min = 1, .max = 8 },
154         .p2 = { .dot_limit = 200000,
155                 .p2_slow = 10, .p2_fast = 5 },
156 };
157 
158 static const intel_limit_t intel_limits_i9xx_lvds = {
159         .dot = { .min = 20000, .max = 400000 },
160         .vco = { .min = 1400000, .max = 2800000 },
161         .n = { .min = 1, .max = 6 },
162         .m = { .min = 70, .max = 120 },
163         .m1 = { .min = 8, .max = 18 },
164         .m2 = { .min = 3, .max = 7 },
165         .p = { .min = 7, .max = 98 },
166         .p1 = { .min = 1, .max = 8 },
167         .p2 = { .dot_limit = 112000,
168                 .p2_slow = 14, .p2_fast = 7 },
169 };
170 
171 
172 static const intel_limit_t intel_limits_g4x_sdvo = {
173         .dot = { .min = 25000, .max = 270000 },
174         .vco = { .min = 1750000, .max = 3500000},
175         .n = { .min = 1, .max = 4 },
176         .m = { .min = 104, .max = 138 },
177         .m1 = { .min = 17, .max = 23 },
178         .m2 = { .min = 5, .max = 11 },
179         .p = { .min = 10, .max = 30 },
180         .p1 = { .min = 1, .max = 3},
181         .p2 = { .dot_limit = 270000,
182                 .p2_slow = 10,
183                 .p2_fast = 10
184         },
185 };
186 
187 static const intel_limit_t intel_limits_g4x_hdmi = {
188         .dot = { .min = 22000, .max = 400000 },
189         .vco = { .min = 1750000, .max = 3500000},
190         .n = { .min = 1, .max = 4 },
191         .m = { .min = 104, .max = 138 },
192         .m1 = { .min = 16, .max = 23 },
193         .m2 = { .min = 5, .max = 11 },
194         .p = { .min = 5, .max = 80 },
195         .p1 = { .min = 1, .max = 8},
196         .p2 = { .dot_limit = 165000,
197                 .p2_slow = 10, .p2_fast = 5 },
198 };
199 
200 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
201         .dot = { .min = 20000, .max = 115000 },
202         .vco = { .min = 1750000, .max = 3500000 },
203         .n = { .min = 1, .max = 3 },
204         .m = { .min = 104, .max = 138 },
205         .m1 = { .min = 17, .max = 23 },
206         .m2 = { .min = 5, .max = 11 },
207         .p = { .min = 28, .max = 112 },
208         .p1 = { .min = 2, .max = 8 },
209         .p2 = { .dot_limit = 0,
210                 .p2_slow = 14, .p2_fast = 14
211         },
212 };
213 
214 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
215         .dot = { .min = 80000, .max = 224000 },
216         .vco = { .min = 1750000, .max = 3500000 },
217         .n = { .min = 1, .max = 3 },
218         .m = { .min = 104, .max = 138 },
219         .m1 = { .min = 17, .max = 23 },
220         .m2 = { .min = 5, .max = 11 },
221         .p = { .min = 14, .max = 42 },
222         .p1 = { .min = 2, .max = 6 },
223         .p2 = { .dot_limit = 0,
224                 .p2_slow = 7, .p2_fast = 7
225         },
226 };
227 
228 static const intel_limit_t intel_limits_pineview_sdvo = {
229         .dot = { .min = 20000, .max = 400000},
230         .vco = { .min = 1700000, .max = 3500000 },
231         /* Pineview's Ncounter is a ring counter */
232         .n = { .min = 3, .max = 6 },
233         .m = { .min = 2, .max = 256 },
234         /* Pineview only has one combined m divider, which we treat as m2. */
235         .m1 = { .min = 0, .max = 0 },
236         .m2 = { .min = 0, .max = 254 },
237         .p = { .min = 5, .max = 80 },
238         .p1 = { .min = 1, .max = 8 },
239         .p2 = { .dot_limit = 200000,
240                 .p2_slow = 10, .p2_fast = 5 },
241 };
242 
243 static const intel_limit_t intel_limits_pineview_lvds = {
244         .dot = { .min = 20000, .max = 400000 },
245         .vco = { .min = 1700000, .max = 3500000 },
246         .n = { .min = 3, .max = 6 },
247         .m = { .min = 2, .max = 256 },
248         .m1 = { .min = 0, .max = 0 },
249         .m2 = { .min = 0, .max = 254 },
250         .p = { .min = 7, .max = 112 },
251         .p1 = { .min = 1, .max = 8 },
252         .p2 = { .dot_limit = 112000,
253                 .p2_slow = 14, .p2_fast = 14 },
254 };
255 
256 /* Ironlake / Sandybridge
257  *
258  * We calculate clock using (register_value + 2) for N/M1/M2, so here
259  * the range value for them is (actual_value - 2).
260  */
261 static const intel_limit_t intel_limits_ironlake_dac = {
262         .dot = { .min = 25000, .max = 350000 },
263         .vco = { .min = 1760000, .max = 3510000 },
264         .n = { .min = 1, .max = 5 },
265         .m = { .min = 79, .max = 127 },
266         .m1 = { .min = 12, .max = 22 },
267         .m2 = { .min = 5, .max = 9 },
268         .p = { .min = 5, .max = 80 },
269         .p1 = { .min = 1, .max = 8 },
270         .p2 = { .dot_limit = 225000,
271                 .p2_slow = 10, .p2_fast = 5 },
272 };
273 
274 static const intel_limit_t intel_limits_ironlake_single_lvds = {
275         .dot = { .min = 25000, .max = 350000 },
276         .vco = { .min = 1760000, .max = 3510000 },
277         .n = { .min = 1, .max = 3 },
278         .m = { .min = 79, .max = 118 },
279         .m1 = { .min = 12, .max = 22 },
280         .m2 = { .min = 5, .max = 9 },
281         .p = { .min = 28, .max = 112 },
282         .p1 = { .min = 2, .max = 8 },
283         .p2 = { .dot_limit = 225000,
284                 .p2_slow = 14, .p2_fast = 14 },
285 };
286 
287 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
288         .dot = { .min = 25000, .max = 350000 },
289         .vco = { .min = 1760000, .max = 3510000 },
290         .n = { .min = 1, .max = 3 },
291         .m = { .min = 79, .max = 127 },
292         .m1 = { .min = 12, .max = 22 },
293         .m2 = { .min = 5, .max = 9 },
294         .p = { .min = 14, .max = 56 },
295         .p1 = { .min = 2, .max = 8 },
296         .p2 = { .dot_limit = 225000,
297                 .p2_slow = 7, .p2_fast = 7 },
298 };
299 
300 /* LVDS 100mhz refclk limits. */
301 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
302         .dot = { .min = 25000, .max = 350000 },
303         .vco = { .min = 1760000, .max = 3510000 },
304         .n = { .min = 1, .max = 2 },
305         .m = { .min = 79, .max = 126 },
306         .m1 = { .min = 12, .max = 22 },
307         .m2 = { .min = 5, .max = 9 },
308         .p = { .min = 28, .max = 112 },
309         .p1 = { .min = 2, .max = 8 },
310         .p2 = { .dot_limit = 225000,
311                 .p2_slow = 14, .p2_fast = 14 },
312 };
313 
314 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
315         .dot = { .min = 25000, .max = 350000 },
316         .vco = { .min = 1760000, .max = 3510000 },
317         .n = { .min = 1, .max = 3 },
318         .m = { .min = 79, .max = 126 },
319         .m1 = { .min = 12, .max = 22 },
320         .m2 = { .min = 5, .max = 9 },
321         .p = { .min = 14, .max = 42 },
322         .p1 = { .min = 2, .max = 6 },
323         .p2 = { .dot_limit = 225000,
324                 .p2_slow = 7, .p2_fast = 7 },
325 };
326 
327 static const intel_limit_t intel_limits_vlv = {
328          /*
329           * These are the data rate limits (measured in fast clocks)
330           * since those are the strictest limits we have. The fast
331           * clock and actual rate limits are more relaxed, so checking
332           * them would make no difference.
333           */
334         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
335         .vco = { .min = 4000000, .max = 6000000 },
336         .n = { .min = 1, .max = 7 },
337         .m1 = { .min = 2, .max = 3 },
338         .m2 = { .min = 11, .max = 156 },
339         .p1 = { .min = 2, .max = 3 },
340         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
341 };
342 
343 static const intel_limit_t intel_limits_chv = {
344         /*
345          * These are the data rate limits (measured in fast clocks)
346          * since those are the strictest limits we have.  The fast
347          * clock and actual rate limits are more relaxed, so checking
348          * them would make no difference.
349          */
350         .dot = { .min = 25000 * 5, .max = 540000 * 5},
351         .vco = { .min = 4860000, .max = 6700000 },
352         .n = { .min = 1, .max = 1 },
353         .m1 = { .min = 2, .max = 2 },
354         .m2 = { .min = 24 << 22, .max = 175 << 22 },
355         .p1 = { .min = 2, .max = 4 },
356         .p2 = { .p2_slow = 1, .p2_fast = 14 },
357 };
358 
359 static void vlv_clock(int refclk, intel_clock_t *clock)
360 {
361         clock->m = clock->m1 * clock->m2;
362         clock->p = clock->p1 * clock->p2;
363         if (WARN_ON(clock->n == 0 || clock->p == 0))
364                 return;
365         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
366         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
367 }
368 
369 /**
370  * Returns whether any output on the specified pipe is of the specified type
371  */
372 static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
373 {
374         struct drm_device *dev = crtc->dev;
375         struct intel_encoder *encoder;
376 
377         for_each_encoder_on_crtc(dev, crtc, encoder)
378                 if (encoder->type == type)
379                         return true;
380 
381         return false;
382 }
383 
384 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
385                                                 int refclk)
386 {
387         struct drm_device *dev = crtc->dev;
388         const intel_limit_t *limit;
389 
390         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
391                 if (intel_is_dual_link_lvds(dev)) {
392                         if (refclk == 100000)
393                                 limit = &intel_limits_ironlake_dual_lvds_100m;
394                         else
395                                 limit = &intel_limits_ironlake_dual_lvds;
396                 } else {
397                         if (refclk == 100000)
398                                 limit = &intel_limits_ironlake_single_lvds_100m;
399                         else
400                                 limit = &intel_limits_ironlake_single_lvds;
401                 }
402         } else
403                 limit = &intel_limits_ironlake_dac;
404 
405         return limit;
406 }
407 
408 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
409 {
410         struct drm_device *dev = crtc->dev;
411         const intel_limit_t *limit;
412 
413         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
414                 if (intel_is_dual_link_lvds(dev))
415                         limit = &intel_limits_g4x_dual_channel_lvds;
416                 else
417                         limit = &intel_limits_g4x_single_channel_lvds;
418         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
419                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
420                 limit = &intel_limits_g4x_hdmi;
421         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
422                 limit = &intel_limits_g4x_sdvo;
423         } else /* The option is for other outputs */
424                 limit = &intel_limits_i9xx_sdvo;
425 
426         return limit;
427 }
428 
429 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
430 {
431         struct drm_device *dev = crtc->dev;
432         const intel_limit_t *limit;
433 
434         if (HAS_PCH_SPLIT(dev))
435                 limit = intel_ironlake_limit(crtc, refclk);
436         else if (IS_G4X(dev)) {
437                 limit = intel_g4x_limit(crtc);
438         } else if (IS_PINEVIEW(dev)) {
439                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
440                         limit = &intel_limits_pineview_lvds;
441                 else
442                         limit = &intel_limits_pineview_sdvo;
443         } else if (IS_CHERRYVIEW(dev)) {
444                 limit = &intel_limits_chv;
445         } else if (IS_VALLEYVIEW(dev)) {
446                 limit = &intel_limits_vlv;
447         } else if (!IS_GEN2(dev)) {
448                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
449                         limit = &intel_limits_i9xx_lvds;
450                 else
451                         limit = &intel_limits_i9xx_sdvo;
452         } else {
453                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
454                         limit = &intel_limits_i8xx_lvds;
455                 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
456                         limit = &intel_limits_i8xx_dvo;
457                 else
458                         limit = &intel_limits_i8xx_dac;
459         }
460         return limit;
461 }
462 
463 /* m1 is reserved as 0 in Pineview, n is a ring counter */
464 static void pineview_clock(int refclk, intel_clock_t *clock)
465 {
466         clock->m = clock->m2 + 2;
467         clock->p = clock->p1 * clock->p2;
468         if (WARN_ON(clock->n == 0 || clock->p == 0))
469                 return;
470         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
471         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
472 }
473 
474 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
475 {
476         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
477 }
478 
479 static void i9xx_clock(int refclk, intel_clock_t *clock)
480 {
481         clock->m = i9xx_dpll_compute_m(clock);
482         clock->p = clock->p1 * clock->p2;
483         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
484                 return;
485         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
486         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
487 }
488 
489 static void chv_clock(int refclk, intel_clock_t *clock)
490 {
491         clock->m = clock->m1 * clock->m2;
492         clock->p = clock->p1 * clock->p2;
493         if (WARN_ON(clock->n == 0 || clock->p == 0))
494                 return;
495         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
496                         clock->n << 22);
497         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
498 }
499 
500 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
501 /**
502  * Returns whether the given set of divisors are valid for a given refclk with
503  * the given connectors.
504  */
505 
506 static bool intel_PLL_is_valid(struct drm_device *dev,
507                                const intel_limit_t *limit,
508                                const intel_clock_t *clock)
509 {
510         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
511                 INTELPllInvalid("n out of range\n");
512         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
513                 INTELPllInvalid("p1 out of range\n");
514         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
515                 INTELPllInvalid("m2 out of range\n");
516         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
517                 INTELPllInvalid("m1 out of range\n");
518 
519         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
520                 if (clock->m1 <= clock->m2)
521                         INTELPllInvalid("m1 <= m2\n");
522 
523         if (!IS_VALLEYVIEW(dev)) {
524                 if (clock->p < limit->p.min || limit->p.max < clock->p)
525                         INTELPllInvalid("p out of range\n");
526                 if (clock->m < limit->m.min || limit->m.max < clock->m)
527                         INTELPllInvalid("m out of range\n");
528         }
529 
530         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
531                 INTELPllInvalid("vco out of range\n");
532         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
533          * connector, etc., rather than just a single range.
534          */
535         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
536                 INTELPllInvalid("dot out of range\n");
537 
538         return true;
539 }
540 
541 static bool
542 i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
543                     int target, int refclk, intel_clock_t *match_clock,
544                     intel_clock_t *best_clock)
545 {
546         struct drm_device *dev = crtc->dev;
547         intel_clock_t clock;
548         int err = target;
549 
550         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
551                 /*
552                  * For LVDS just rely on its current settings for dual-channel.
553                  * We haven't figured out how to reliably set up different
554                  * single/dual channel state, if we even can.
555                  */
556                 if (intel_is_dual_link_lvds(dev))
557                         clock.p2 = limit->p2.p2_fast;
558                 else
559                         clock.p2 = limit->p2.p2_slow;
560         } else {
561                 if (target < limit->p2.dot_limit)
562                         clock.p2 = limit->p2.p2_slow;
563                 else
564                         clock.p2 = limit->p2.p2_fast;
565         }
566 
567         memset(best_clock, 0, sizeof(*best_clock));
568 
569         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
570              clock.m1++) {
571                 for (clock.m2 = limit->m2.min;
572                      clock.m2 <= limit->m2.max; clock.m2++) {
573                         if (clock.m2 >= clock.m1)
574                                 break;
575                         for (clock.n = limit->n.min;
576                              clock.n <= limit->n.max; clock.n++) {
577                                 for (clock.p1 = limit->p1.min;
578                                         clock.p1 <= limit->p1.max; clock.p1++) {
579                                         int this_err;
580 
581                                         i9xx_clock(refclk, &clock);
582                                         if (!intel_PLL_is_valid(dev, limit,
583                                                                 &clock))
584                                                 continue;
585                                         if (match_clock &&
586                                             clock.p != match_clock->p)
587                                                 continue;
588 
589                                         this_err = abs(clock.dot - target);
590                                         if (this_err < err) {
591                                                 *best_clock = clock;
592                                                 err = this_err;
593                                         }
594                                 }
595                         }
596                 }
597         }
598 
599         return (err != target);
600 }
601 
602 static bool
603 pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
604                    int target, int refclk, intel_clock_t *match_clock,
605                    intel_clock_t *best_clock)
606 {
607         struct drm_device *dev = crtc->dev;
608         intel_clock_t clock;
609         int err = target;
610 
611         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
612                 /*
613                  * For LVDS just rely on its current settings for dual-channel.
614                  * We haven't figured out how to reliably set up different
615                  * single/dual channel state, if we even can.
616                  */
617                 if (intel_is_dual_link_lvds(dev))
618                         clock.p2 = limit->p2.p2_fast;
619                 else
620                         clock.p2 = limit->p2.p2_slow;
621         } else {
622                 if (target < limit->p2.dot_limit)
623                         clock.p2 = limit->p2.p2_slow;
624                 else
625                         clock.p2 = limit->p2.p2_fast;
626         }
627 
628         memset(best_clock, 0, sizeof(*best_clock));
629 
630         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
631              clock.m1++) {
632                 for (clock.m2 = limit->m2.min;
633                      clock.m2 <= limit->m2.max; clock.m2++) {
634                         for (clock.n = limit->n.min;
635                              clock.n <= limit->n.max; clock.n++) {
636                                 for (clock.p1 = limit->p1.min;
637                                         clock.p1 <= limit->p1.max; clock.p1++) {
638                                         int this_err;
639 
640                                         pineview_clock(refclk, &clock);
641                                         if (!intel_PLL_is_valid(dev, limit,
642                                                                 &clock))
643                                                 continue;
644                                         if (match_clock &&
645                                             clock.p != match_clock->p)
646                                                 continue;
647 
648                                         this_err = abs(clock.dot - target);
649                                         if (this_err < err) {
650                                                 *best_clock = clock;
651                                                 err = this_err;
652                                         }
653                                 }
654                         }
655                 }
656         }
657 
658         return (err != target);
659 }
660 
661 static bool
662 g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
663                    int target, int refclk, intel_clock_t *match_clock,
664                    intel_clock_t *best_clock)
665 {
666         struct drm_device *dev = crtc->dev;
667         intel_clock_t clock;
668         int max_n;
669         bool found;
670         /* approximately equals target * 0.00585 */
671         int err_most = (target >> 8) + (target >> 9);
672         found = false;
673 
674         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
675                 if (intel_is_dual_link_lvds(dev))
676                         clock.p2 = limit->p2.p2_fast;
677                 else
678                         clock.p2 = limit->p2.p2_slow;
679         } else {
680                 if (target < limit->p2.dot_limit)
681                         clock.p2 = limit->p2.p2_slow;
682                 else
683                         clock.p2 = limit->p2.p2_fast;
684         }
685 
686         memset(best_clock, 0, sizeof(*best_clock));
687         max_n = limit->n.max;
688         /* based on hardware requirement, prefer smaller n to precision */
689         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
690                 /* based on hardware requirement, prefere larger m1,m2 */
691                 for (clock.m1 = limit->m1.max;
692                      clock.m1 >= limit->m1.min; clock.m1--) {
693                         for (clock.m2 = limit->m2.max;
694                              clock.m2 >= limit->m2.min; clock.m2--) {
695                                 for (clock.p1 = limit->p1.max;
696                                      clock.p1 >= limit->p1.min; clock.p1--) {
697                                         int this_err;
698 
699                                         i9xx_clock(refclk, &clock);
700                                         if (!intel_PLL_is_valid(dev, limit,
701                                                                 &clock))
702                                                 continue;
703 
704                                         this_err = abs(clock.dot - target);
705                                         if (this_err < err_most) {
706                                                 *best_clock = clock;
707                                                 err_most = this_err;
708                                                 max_n = clock.n;
709                                                 found = true;
710                                         }
711                                 }
712                         }
713                 }
714         }
715         return found;
716 }
717 
718 static bool
719 vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
720                    int target, int refclk, intel_clock_t *match_clock,
721                    intel_clock_t *best_clock)
722 {
723         struct drm_device *dev = crtc->dev;
724         intel_clock_t clock;
725         unsigned int bestppm = 1000000;
726         /* min update 19.2 MHz */
727         int max_n = min(limit->n.max, refclk / 19200);
728         bool found = false;
729 
730         target *= 5; /* fast clock */
731 
732         memset(best_clock, 0, sizeof(*best_clock));
733 
734         /* based on hardware requirement, prefer smaller n to precision */
735         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
736                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
737                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
738                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
739                                 clock.p = clock.p1 * clock.p2;
740                                 /* based on hardware requirement, prefer bigger m1,m2 values */
741                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
742                                         unsigned int ppm, diff;
743 
744                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
745                                                                      refclk * clock.m1);
746 
747                                         vlv_clock(refclk, &clock);
748 
749                                         if (!intel_PLL_is_valid(dev, limit,
750                                                                 &clock))
751                                                 continue;
752 
753                                         diff = abs(clock.dot - target);
754                                         ppm = div_u64(1000000ULL * diff, target);
755 
756                                         if (ppm < 100 && clock.p > best_clock->p) {
757                                                 bestppm = 0;
758                                                 *best_clock = clock;
759                                                 found = true;
760                                         }
761 
762                                         if (bestppm >= 10 && ppm < bestppm - 10) {
763                                                 bestppm = ppm;
764                                                 *best_clock = clock;
765                                                 found = true;
766                                         }
767                                 }
768                         }
769                 }
770         }
771 
772         return found;
773 }
774 
775 static bool
776 chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
777                    int target, int refclk, intel_clock_t *match_clock,
778                    intel_clock_t *best_clock)
779 {
780         struct drm_device *dev = crtc->dev;
781         intel_clock_t clock;
782         uint64_t m2;
783         int found = false;
784 
785         memset(best_clock, 0, sizeof(*best_clock));
786 
787         /*
788          * Based on hardware doc, the n always set to 1, and m1 always
789          * set to 2.  If requires to support 200Mhz refclk, we need to
790          * revisit this because n may not 1 anymore.
791          */
792         clock.n = 1, clock.m1 = 2;
793         target *= 5;    /* fast clock */
794 
795         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
796                 for (clock.p2 = limit->p2.p2_fast;
797                                 clock.p2 >= limit->p2.p2_slow;
798                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
799 
800                         clock.p = clock.p1 * clock.p2;
801 
802                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
803                                         clock.n) << 22, refclk * clock.m1);
804 
805                         if (m2 > INT_MAX/clock.m1)
806                                 continue;
807 
808                         clock.m2 = m2;
809 
810                         chv_clock(refclk, &clock);
811 
812                         if (!intel_PLL_is_valid(dev, limit, &clock))
813                                 continue;
814 
815                         /* based on hardware requirement, prefer bigger p
816                          */
817                         if (clock.p > best_clock->p) {
818                                 *best_clock = clock;
819                                 found = true;
820                         }
821                 }
822         }
823 
824         return found;
825 }
826 
827 bool intel_crtc_active(struct drm_crtc *crtc)
828 {
829         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
830 
831         /* Be paranoid as we can arrive here with only partial
832          * state retrieved from the hardware during setup.
833          *
834          * We can ditch the adjusted_mode.crtc_clock check as soon
835          * as Haswell has gained clock readout/fastboot support.
836          *
837          * We can ditch the crtc->primary->fb check as soon as we can
838          * properly reconstruct framebuffers.
839          */
840         return intel_crtc->active && crtc->primary->fb &&
841                 intel_crtc->config.adjusted_mode.crtc_clock;
842 }
843 
844 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
845                                              enum pipe pipe)
846 {
847         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
848         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
849 
850         return intel_crtc->config.cpu_transcoder;
851 }
852 
853 static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
854 {
855         struct drm_i915_private *dev_priv = dev->dev_private;
856         u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
857 
858         frame = I915_READ(frame_reg);
859 
860         if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
861                 WARN(1, "vblank wait timed out\n");
862 }
863 
864 /**
865  * intel_wait_for_vblank - wait for vblank on a given pipe
866  * @dev: drm device
867  * @pipe: pipe to wait for
868  *
869  * Wait for vblank to occur on a given pipe.  Needed for various bits of
870  * mode setting code.
871  */
872 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
873 {
874         struct drm_i915_private *dev_priv = dev->dev_private;
875         int pipestat_reg = PIPESTAT(pipe);
876 
877         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
878                 g4x_wait_for_vblank(dev, pipe);
879                 return;
880         }
881 
882         /* Clear existing vblank status. Note this will clear any other
883          * sticky status fields as well.
884          *
885          * This races with i915_driver_irq_handler() with the result
886          * that either function could miss a vblank event.  Here it is not
887          * fatal, as we will either wait upon the next vblank interrupt or
888          * timeout.  Generally speaking intel_wait_for_vblank() is only
889          * called during modeset at which time the GPU should be idle and
890          * should *not* be performing page flips and thus not waiting on
891          * vblanks...
892          * Currently, the result of us stealing a vblank from the irq
893          * handler is that a single frame will be skipped during swapbuffers.
894          */
895         I915_WRITE(pipestat_reg,
896                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
897 
898         /* Wait for vblank interrupt bit to set */
899         if (wait_for(I915_READ(pipestat_reg) &
900                      PIPE_VBLANK_INTERRUPT_STATUS,
901                      50))
902                 DRM_DEBUG_KMS("vblank wait timed out\n");
903 }
904 
905 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
906 {
907         struct drm_i915_private *dev_priv = dev->dev_private;
908         u32 reg = PIPEDSL(pipe);
909         u32 line1, line2;
910         u32 line_mask;
911 
912         if (IS_GEN2(dev))
913                 line_mask = DSL_LINEMASK_GEN2;
914         else
915                 line_mask = DSL_LINEMASK_GEN3;
916 
917         line1 = I915_READ(reg) & line_mask;
918         mdelay(5);
919         line2 = I915_READ(reg) & line_mask;
920 
921         return line1 == line2;
922 }
923 
924 /*
925  * intel_wait_for_pipe_off - wait for pipe to turn off
926  * @dev: drm device
927  * @pipe: pipe to wait for
928  *
929  * After disabling a pipe, we can't wait for vblank in the usual way,
930  * spinning on the vblank interrupt status bit, since we won't actually
931  * see an interrupt when the pipe is disabled.
932  *
933  * On Gen4 and above:
934  *   wait for the pipe register state bit to turn off
935  *
936  * Otherwise:
937  *   wait for the display line value to settle (it usually
938  *   ends up stopping at the start of the next frame).
939  *
940  */
941 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
942 {
943         struct drm_i915_private *dev_priv = dev->dev_private;
944         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
945                                                                       pipe);
946 
947         if (INTEL_INFO(dev)->gen >= 4) {
948                 int reg = PIPECONF(cpu_transcoder);
949 
950                 /* Wait for the Pipe State to go off */
951                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
952                              100))
953                         WARN(1, "pipe_off wait timed out\n");
954         } else {
955                 /* Wait for the display line to settle */
956                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
957                         WARN(1, "pipe_off wait timed out\n");
958         }
959 }
960 
961 /*
962  * ibx_digital_port_connected - is the specified port connected?
963  * @dev_priv: i915 private structure
964  * @port: the port to test
965  *
966  * Returns true if @port is connected, false otherwise.
967  */
968 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
969                                 struct intel_digital_port *port)
970 {
971         u32 bit;
972 
973         if (HAS_PCH_IBX(dev_priv->dev)) {
974                 switch (port->port) {
975                 case PORT_B:
976                         bit = SDE_PORTB_HOTPLUG;
977                         break;
978                 case PORT_C:
979                         bit = SDE_PORTC_HOTPLUG;
980                         break;
981                 case PORT_D:
982                         bit = SDE_PORTD_HOTPLUG;
983                         break;
984                 default:
985                         return true;
986                 }
987         } else {
988                 switch (port->port) {
989                 case PORT_B:
990                         bit = SDE_PORTB_HOTPLUG_CPT;
991                         break;
992                 case PORT_C:
993                         bit = SDE_PORTC_HOTPLUG_CPT;
994                         break;
995                 case PORT_D:
996                         bit = SDE_PORTD_HOTPLUG_CPT;
997                         break;
998                 default:
999                         return true;
1000                 }
1001         }
1002 
1003         return I915_READ(SDEISR) & bit;
1004 }
1005 
1006 static const char *state_string(bool enabled)
1007 {
1008         return enabled ? "on" : "off";
1009 }
1010 
1011 /* Only for pre-ILK configs */
1012 void assert_pll(struct drm_i915_private *dev_priv,
1013                 enum pipe pipe, bool state)
1014 {
1015         int reg;
1016         u32 val;
1017         bool cur_state;
1018 
1019         reg = DPLL(pipe);
1020         val = I915_READ(reg);
1021         cur_state = !!(val & DPLL_VCO_ENABLE);
1022         WARN(cur_state != state,
1023              "PLL state assertion failure (expected %s, current %s)\n",
1024              state_string(state), state_string(cur_state));
1025 }
1026 
1027 /* XXX: the dsi pll is shared between MIPI DSI ports */
1028 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1029 {
1030         u32 val;
1031         bool cur_state;
1032 
1033         mutex_lock(&dev_priv->dpio_lock);
1034         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1035         mutex_unlock(&dev_priv->dpio_lock);
1036 
1037         cur_state = val & DSI_PLL_VCO_EN;
1038         WARN(cur_state != state,
1039              "DSI PLL state assertion failure (expected %s, current %s)\n",
1040              state_string(state), state_string(cur_state));
1041 }
1042 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1043 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1044 
1045 struct intel_shared_dpll *
1046 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1047 {
1048         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1049 
1050         if (crtc->config.shared_dpll < 0)
1051                 return NULL;
1052 
1053         return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1054 }
1055 
1056 /* For ILK+ */
1057 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1058                         struct intel_shared_dpll *pll,
1059                         bool state)
1060 {
1061         bool cur_state;
1062         struct intel_dpll_hw_state hw_state;
1063 
1064         if (HAS_PCH_LPT(dev_priv->dev)) {
1065                 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1066                 return;
1067         }
1068 
1069         if (WARN (!pll,
1070                   "asserting DPLL %s with no DPLL\n", state_string(state)))
1071                 return;
1072 
1073         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1074         WARN(cur_state != state,
1075              "%s assertion failure (expected %s, current %s)\n",
1076              pll->name, state_string(state), state_string(cur_state));
1077 }
1078 
1079 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1080                           enum pipe pipe, bool state)
1081 {
1082         int reg;
1083         u32 val;
1084         bool cur_state;
1085         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1086                                                                       pipe);
1087 
1088         if (HAS_DDI(dev_priv->dev)) {
1089                 /* DDI does not have a specific FDI_TX register */
1090                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1091                 val = I915_READ(reg);
1092                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1093         } else {
1094                 reg = FDI_TX_CTL(pipe);
1095                 val = I915_READ(reg);
1096                 cur_state = !!(val & FDI_TX_ENABLE);
1097         }
1098         WARN(cur_state != state,
1099              "FDI TX state assertion failure (expected %s, current %s)\n",
1100              state_string(state), state_string(cur_state));
1101 }
1102 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1103 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1104 
1105 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1106                           enum pipe pipe, bool state)
1107 {
1108         int reg;
1109         u32 val;
1110         bool cur_state;
1111 
1112         reg = FDI_RX_CTL(pipe);
1113         val = I915_READ(reg);
1114         cur_state = !!(val & FDI_RX_ENABLE);
1115         WARN(cur_state != state,
1116              "FDI RX state assertion failure (expected %s, current %s)\n",
1117              state_string(state), state_string(cur_state));
1118 }
1119 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1120 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1121 
1122 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1123                                       enum pipe pipe)
1124 {
1125         int reg;
1126         u32 val;
1127 
1128         /* ILK FDI PLL is always enabled */
1129         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1130                 return;
1131 
1132         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1133         if (HAS_DDI(dev_priv->dev))
1134                 return;
1135 
1136         reg = FDI_TX_CTL(pipe);
1137         val = I915_READ(reg);
1138         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1139 }
1140 
1141 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1142                        enum pipe pipe, bool state)
1143 {
1144         int reg;
1145         u32 val;
1146         bool cur_state;
1147 
1148         reg = FDI_RX_CTL(pipe);
1149         val = I915_READ(reg);
1150         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1151         WARN(cur_state != state,
1152              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1153              state_string(state), state_string(cur_state));
1154 }
1155 
1156 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1157                                   enum pipe pipe)
1158 {
1159         int pp_reg, lvds_reg;
1160         u32 val;
1161         enum pipe panel_pipe = PIPE_A;
1162         bool locked = true;
1163 
1164         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1165                 pp_reg = PCH_PP_CONTROL;
1166                 lvds_reg = PCH_LVDS;
1167         } else {
1168                 pp_reg = PP_CONTROL;
1169                 lvds_reg = LVDS;
1170         }
1171 
1172         val = I915_READ(pp_reg);
1173         if (!(val & PANEL_POWER_ON) ||
1174             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1175                 locked = false;
1176 
1177         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1178                 panel_pipe = PIPE_B;
1179 
1180         WARN(panel_pipe == pipe && locked,
1181              "panel assertion failure, pipe %c regs locked\n",
1182              pipe_name(pipe));
1183 }
1184 
1185 static void assert_cursor(struct drm_i915_private *dev_priv,
1186                           enum pipe pipe, bool state)
1187 {
1188         struct drm_device *dev = dev_priv->dev;
1189         bool cur_state;
1190 
1191         if (IS_845G(dev) || IS_I865G(dev))
1192                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1193         else
1194                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1195 
1196         WARN(cur_state != state,
1197              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1198              pipe_name(pipe), state_string(state), state_string(cur_state));
1199 }
1200 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1201 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1202 
1203 void assert_pipe(struct drm_i915_private *dev_priv,
1204                  enum pipe pipe, bool state)
1205 {
1206         int reg;
1207         u32 val;
1208         bool cur_state;
1209         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1210                                                                       pipe);
1211 
1212         /* if we need the pipe A quirk it must be always on */
1213         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1214                 state = true;
1215 
1216         if (!intel_display_power_enabled(dev_priv,
1217                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1218                 cur_state = false;
1219         } else {
1220                 reg = PIPECONF(cpu_transcoder);
1221                 val = I915_READ(reg);
1222                 cur_state = !!(val & PIPECONF_ENABLE);
1223         }
1224 
1225         WARN(cur_state != state,
1226              "pipe %c assertion failure (expected %s, current %s)\n",
1227              pipe_name(pipe), state_string(state), state_string(cur_state));
1228 }
1229 
1230 static void assert_plane(struct drm_i915_private *dev_priv,
1231                          enum plane plane, bool state)
1232 {
1233         int reg;
1234         u32 val;
1235         bool cur_state;
1236 
1237         reg = DSPCNTR(plane);
1238         val = I915_READ(reg);
1239         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1240         WARN(cur_state != state,
1241              "plane %c assertion failure (expected %s, current %s)\n",
1242              plane_name(plane), state_string(state), state_string(cur_state));
1243 }
1244 
1245 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1246 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1247 
1248 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1249                                    enum pipe pipe)
1250 {
1251         struct drm_device *dev = dev_priv->dev;
1252         int reg, i;
1253         u32 val;
1254         int cur_pipe;
1255 
1256         /* Primary planes are fixed to pipes on gen4+ */
1257         if (INTEL_INFO(dev)->gen >= 4) {
1258                 reg = DSPCNTR(pipe);
1259                 val = I915_READ(reg);
1260                 WARN(val & DISPLAY_PLANE_ENABLE,
1261                      "plane %c assertion failure, should be disabled but not\n",
1262                      plane_name(pipe));
1263                 return;
1264         }
1265 
1266         /* Need to check both planes against the pipe */
1267         for_each_pipe(i) {
1268                 reg = DSPCNTR(i);
1269                 val = I915_READ(reg);
1270                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1271                         DISPPLANE_SEL_PIPE_SHIFT;
1272                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1273                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1274                      plane_name(i), pipe_name(pipe));
1275         }
1276 }
1277 
1278 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1279                                     enum pipe pipe)
1280 {
1281         struct drm_device *dev = dev_priv->dev;
1282         int reg, sprite;
1283         u32 val;
1284 
1285         if (IS_VALLEYVIEW(dev)) {
1286                 for_each_sprite(pipe, sprite) {
1287                         reg = SPCNTR(pipe, sprite);
1288                         val = I915_READ(reg);
1289                         WARN(val & SP_ENABLE,
1290                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1291                              sprite_name(pipe, sprite), pipe_name(pipe));
1292                 }
1293         } else if (INTEL_INFO(dev)->gen >= 7) {
1294                 reg = SPRCTL(pipe);
1295                 val = I915_READ(reg);
1296                 WARN(val & SPRITE_ENABLE,
1297                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1298                      plane_name(pipe), pipe_name(pipe));
1299         } else if (INTEL_INFO(dev)->gen >= 5) {
1300                 reg = DVSCNTR(pipe);
1301                 val = I915_READ(reg);
1302                 WARN(val & DVS_ENABLE,
1303                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1304                      plane_name(pipe), pipe_name(pipe));
1305         }
1306 }
1307 
1308 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1309 {
1310         u32 val;
1311         bool enabled;
1312 
1313         WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1314 
1315         val = I915_READ(PCH_DREF_CONTROL);
1316         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1317                             DREF_SUPERSPREAD_SOURCE_MASK));
1318         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1319 }
1320 
1321 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1322                                            enum pipe pipe)
1323 {
1324         int reg;
1325         u32 val;
1326         bool enabled;
1327 
1328         reg = PCH_TRANSCONF(pipe);
1329         val = I915_READ(reg);
1330         enabled = !!(val & TRANS_ENABLE);
1331         WARN(enabled,
1332              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1333              pipe_name(pipe));
1334 }
1335 
1336 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1337                             enum pipe pipe, u32 port_sel, u32 val)
1338 {
1339         if ((val & DP_PORT_EN) == 0)
1340                 return false;
1341 
1342         if (HAS_PCH_CPT(dev_priv->dev)) {
1343                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1344                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1345                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1346                         return false;
1347         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1348                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1349                         return false;
1350         } else {
1351                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1352                         return false;
1353         }
1354         return true;
1355 }
1356 
1357 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1358                               enum pipe pipe, u32 val)
1359 {
1360         if ((val & SDVO_ENABLE) == 0)
1361                 return false;
1362 
1363         if (HAS_PCH_CPT(dev_priv->dev)) {
1364                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1365                         return false;
1366         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1367                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1368                         return false;
1369         } else {
1370                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1371                         return false;
1372         }
1373         return true;
1374 }
1375 
1376 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1377                               enum pipe pipe, u32 val)
1378 {
1379         if ((val & LVDS_PORT_EN) == 0)
1380                 return false;
1381 
1382         if (HAS_PCH_CPT(dev_priv->dev)) {
1383                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1384                         return false;
1385         } else {
1386                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1387                         return false;
1388         }
1389         return true;
1390 }
1391 
1392 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1393                               enum pipe pipe, u32 val)
1394 {
1395         if ((val & ADPA_DAC_ENABLE) == 0)
1396                 return false;
1397         if (HAS_PCH_CPT(dev_priv->dev)) {
1398                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1399                         return false;
1400         } else {
1401                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1402                         return false;
1403         }
1404         return true;
1405 }
1406 
1407 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1408                                    enum pipe pipe, int reg, u32 port_sel)
1409 {
1410         u32 val = I915_READ(reg);
1411         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1412              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1413              reg, pipe_name(pipe));
1414 
1415         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1416              && (val & DP_PIPEB_SELECT),
1417              "IBX PCH dp port still using transcoder B\n");
1418 }
1419 
1420 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1421                                      enum pipe pipe, int reg)
1422 {
1423         u32 val = I915_READ(reg);
1424         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1425              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1426              reg, pipe_name(pipe));
1427 
1428         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1429              && (val & SDVO_PIPE_B_SELECT),
1430              "IBX PCH hdmi port still using transcoder B\n");
1431 }
1432 
1433 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1434                                       enum pipe pipe)
1435 {
1436         int reg;
1437         u32 val;
1438 
1439         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1440         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1441         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1442 
1443         reg = PCH_ADPA;
1444         val = I915_READ(reg);
1445         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1446              "PCH VGA enabled on transcoder %c, should be disabled\n",
1447              pipe_name(pipe));
1448 
1449         reg = PCH_LVDS;
1450         val = I915_READ(reg);
1451         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1452              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1453              pipe_name(pipe));
1454 
1455         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1456         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1457         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1458 }
1459 
1460 static void intel_init_dpio(struct drm_device *dev)
1461 {
1462         struct drm_i915_private *dev_priv = dev->dev_private;
1463 
1464         if (!IS_VALLEYVIEW(dev))
1465                 return;
1466 
1467         /*
1468          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1469          * CHV x1 PHY (DP/HDMI D)
1470          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1471          */
1472         if (IS_CHERRYVIEW(dev)) {
1473                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1474                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1475         } else {
1476                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1477         }
1478 }
1479 
1480 static void intel_reset_dpio(struct drm_device *dev)
1481 {
1482         struct drm_i915_private *dev_priv = dev->dev_private;
1483 
1484         if (!IS_VALLEYVIEW(dev))
1485                 return;
1486 
1487         if (IS_CHERRYVIEW(dev)) {
1488                 enum dpio_phy phy;
1489                 u32 val;
1490 
1491                 for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1492                         /* Poll for phypwrgood signal */
1493                         if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1494                                                 PHY_POWERGOOD(phy), 1))
1495                                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1496 
1497                         /*
1498                          * Deassert common lane reset for PHY.
1499                          *
1500                          * This should only be done on init and resume from S3
1501                          * with both PLLs disabled, or we risk losing DPIO and
1502                          * PLL synchronization.
1503                          */
1504                         val = I915_READ(DISPLAY_PHY_CONTROL);
1505                         I915_WRITE(DISPLAY_PHY_CONTROL,
1506                                 PHY_COM_LANE_RESET_DEASSERT(phy, val));
1507                 }
1508 
1509         } else {
1510                 /*
1511                  * If DPIO has already been reset, e.g. by BIOS, just skip all
1512                  * this.
1513                  */
1514                 if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
1515                         return;
1516 
1517                 /*
1518                  * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1519                  * Need to assert and de-assert PHY SB reset by gating the
1520                  * common lane power, then un-gating it.
1521                  * Simply ungating isn't enough to reset the PHY enough to get
1522                  * ports and lanes running.
1523                  */
1524                 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1525                                      false);
1526                 __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
1527                                      true);
1528         }
1529 }
1530 
1531 static void vlv_enable_pll(struct intel_crtc *crtc)
1532 {
1533         struct drm_device *dev = crtc->base.dev;
1534         struct drm_i915_private *dev_priv = dev->dev_private;
1535         int reg = DPLL(crtc->pipe);
1536         u32 dpll = crtc->config.dpll_hw_state.dpll;
1537 
1538         assert_pipe_disabled(dev_priv, crtc->pipe);
1539 
1540         /* No really, not for ILK+ */
1541         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1542 
1543         /* PLL is protected by panel, make sure we can write it */
1544         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1545                 assert_panel_unlocked(dev_priv, crtc->pipe);
1546 
1547         I915_WRITE(reg, dpll);
1548         POSTING_READ(reg);
1549         udelay(150);
1550 
1551         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1552                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1553 
1554         I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1555         POSTING_READ(DPLL_MD(crtc->pipe));
1556 
1557         /* We do this three times for luck */
1558         I915_WRITE(reg, dpll);
1559         POSTING_READ(reg);
1560         udelay(150); /* wait for warmup */
1561         I915_WRITE(reg, dpll);
1562         POSTING_READ(reg);
1563         udelay(150); /* wait for warmup */
1564         I915_WRITE(reg, dpll);
1565         POSTING_READ(reg);
1566         udelay(150); /* wait for warmup */
1567 }
1568 
1569 static void chv_enable_pll(struct intel_crtc *crtc)
1570 {
1571         struct drm_device *dev = crtc->base.dev;
1572         struct drm_i915_private *dev_priv = dev->dev_private;
1573         int pipe = crtc->pipe;
1574         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1575         u32 tmp;
1576 
1577         assert_pipe_disabled(dev_priv, crtc->pipe);
1578 
1579         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1580 
1581         mutex_lock(&dev_priv->dpio_lock);
1582 
1583         /* Enable back the 10bit clock to display controller */
1584         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1585         tmp |= DPIO_DCLKP_EN;
1586         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1587 
1588         /*
1589          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1590          */
1591         udelay(1);
1592 
1593         /* Enable PLL */
1594         I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1595 
1596         /* Check PLL is locked */
1597         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1598                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1599 
1600         /* not sure when this should be written */
1601         I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1602         POSTING_READ(DPLL_MD(pipe));
1603 
1604         mutex_unlock(&dev_priv->dpio_lock);
1605 }
1606 
1607 static void i9xx_enable_pll(struct intel_crtc *crtc)
1608 {
1609         struct drm_device *dev = crtc->base.dev;
1610         struct drm_i915_private *dev_priv = dev->dev_private;
1611         int reg = DPLL(crtc->pipe);
1612         u32 dpll = crtc->config.dpll_hw_state.dpll;
1613 
1614         assert_pipe_disabled(dev_priv, crtc->pipe);
1615 
1616         /* No really, not for ILK+ */
1617         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1618 
1619         /* PLL is protected by panel, make sure we can write it */
1620         if (IS_MOBILE(dev) && !IS_I830(dev))
1621                 assert_panel_unlocked(dev_priv, crtc->pipe);
1622 
1623         I915_WRITE(reg, dpll);
1624 
1625         /* Wait for the clocks to stabilize. */
1626         POSTING_READ(reg);
1627         udelay(150);
1628 
1629         if (INTEL_INFO(dev)->gen >= 4) {
1630                 I915_WRITE(DPLL_MD(crtc->pipe),
1631                            crtc->config.dpll_hw_state.dpll_md);
1632         } else {
1633                 /* The pixel multiplier can only be updated once the
1634                  * DPLL is enabled and the clocks are stable.
1635                  *
1636                  * So write it again.
1637                  */
1638                 I915_WRITE(reg, dpll);
1639         }
1640 
1641         /* We do this three times for luck */
1642         I915_WRITE(reg, dpll);
1643         POSTING_READ(reg);
1644         udelay(150); /* wait for warmup */
1645         I915_WRITE(reg, dpll);
1646         POSTING_READ(reg);
1647         udelay(150); /* wait for warmup */
1648         I915_WRITE(reg, dpll);
1649         POSTING_READ(reg);
1650         udelay(150); /* wait for warmup */
1651 }
1652 
1653 /**
1654  * i9xx_disable_pll - disable a PLL
1655  * @dev_priv: i915 private structure
1656  * @pipe: pipe PLL to disable
1657  *
1658  * Disable the PLL for @pipe, making sure the pipe is off first.
1659  *
1660  * Note!  This is for pre-ILK only.
1661  */
1662 static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1663 {
1664         /* Don't disable pipe A or pipe A PLLs if needed */
1665         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1666                 return;
1667 
1668         /* Make sure the pipe isn't still relying on us */
1669         assert_pipe_disabled(dev_priv, pipe);
1670 
1671         I915_WRITE(DPLL(pipe), 0);
1672         POSTING_READ(DPLL(pipe));
1673 }
1674 
1675 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1676 {
1677         u32 val = 0;
1678 
1679         /* Make sure the pipe isn't still relying on us */
1680         assert_pipe_disabled(dev_priv, pipe);
1681 
1682         /*
1683          * Leave integrated clock source and reference clock enabled for pipe B.
1684          * The latter is needed for VGA hotplug / manual detection.
1685          */
1686         if (pipe == PIPE_B)
1687                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1688         I915_WRITE(DPLL(pipe), val);
1689         POSTING_READ(DPLL(pipe));
1690 
1691 }
1692 
1693 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1694 {
1695         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1696         u32 val;
1697 
1698         /* Make sure the pipe isn't still relying on us */
1699         assert_pipe_disabled(dev_priv, pipe);
1700 
1701         /* Set PLL en = 0 */
1702         val = DPLL_SSC_REF_CLOCK_CHV;
1703         if (pipe != PIPE_A)
1704                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1705         I915_WRITE(DPLL(pipe), val);
1706         POSTING_READ(DPLL(pipe));
1707 
1708         mutex_lock(&dev_priv->dpio_lock);
1709 
1710         /* Disable 10bit clock to display controller */
1711         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1712         val &= ~DPIO_DCLKP_EN;
1713         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1714 
1715         mutex_unlock(&dev_priv->dpio_lock);
1716 }
1717 
1718 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1719                 struct intel_digital_port *dport)
1720 {
1721         u32 port_mask;
1722         int dpll_reg;
1723 
1724         switch (dport->port) {
1725         case PORT_B:
1726                 port_mask = DPLL_PORTB_READY_MASK;
1727                 dpll_reg = DPLL(0);
1728                 break;
1729         case PORT_C:
1730                 port_mask = DPLL_PORTC_READY_MASK;
1731                 dpll_reg = DPLL(0);
1732                 break;
1733         case PORT_D:
1734                 port_mask = DPLL_PORTD_READY_MASK;
1735                 dpll_reg = DPIO_PHY_STATUS;
1736                 break;
1737         default:
1738                 BUG();
1739         }
1740 
1741         if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1742                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1743                      port_name(dport->port), I915_READ(dpll_reg));
1744 }
1745 
1746 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1747 {
1748         struct drm_device *dev = crtc->base.dev;
1749         struct drm_i915_private *dev_priv = dev->dev_private;
1750         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1751 
1752         WARN_ON(!pll->refcount);
1753         if (pll->active == 0) {
1754                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1755                 WARN_ON(pll->on);
1756                 assert_shared_dpll_disabled(dev_priv, pll);
1757 
1758                 pll->mode_set(dev_priv, pll);
1759         }
1760 }
1761 
1762 /**
1763  * intel_enable_shared_dpll - enable PCH PLL
1764  * @dev_priv: i915 private structure
1765  * @pipe: pipe PLL to enable
1766  *
1767  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1768  * drives the transcoder clock.
1769  */
1770 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1771 {
1772         struct drm_device *dev = crtc->base.dev;
1773         struct drm_i915_private *dev_priv = dev->dev_private;
1774         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1775 
1776         if (WARN_ON(pll == NULL))
1777                 return;
1778 
1779         if (WARN_ON(pll->refcount == 0))
1780                 return;
1781 
1782         DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1783                       pll->name, pll->active, pll->on,
1784                       crtc->base.base.id);
1785 
1786         if (pll->active++) {
1787                 WARN_ON(!pll->on);
1788                 assert_shared_dpll_enabled(dev_priv, pll);
1789                 return;
1790         }
1791         WARN_ON(pll->on);
1792 
1793         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1794         pll->enable(dev_priv, pll);
1795         pll->on = true;
1796 }
1797 
1798 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1799 {
1800         struct drm_device *dev = crtc->base.dev;
1801         struct drm_i915_private *dev_priv = dev->dev_private;
1802         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1803 
1804         /* PCH only available on ILK+ */
1805         BUG_ON(INTEL_INFO(dev)->gen < 5);
1806         if (WARN_ON(pll == NULL))
1807                return;
1808 
1809         if (WARN_ON(pll->refcount == 0))
1810                 return;
1811 
1812         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1813                       pll->name, pll->active, pll->on,
1814                       crtc->base.base.id);
1815 
1816         if (WARN_ON(pll->active == 0)) {
1817                 assert_shared_dpll_disabled(dev_priv, pll);
1818                 return;
1819         }
1820 
1821         assert_shared_dpll_enabled(dev_priv, pll);
1822         WARN_ON(!pll->on);
1823         if (--pll->active)
1824                 return;
1825 
1826         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1827         pll->disable(dev_priv, pll);
1828         pll->on = false;
1829 }
1830 
1831 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1832                                            enum pipe pipe)
1833 {
1834         struct drm_device *dev = dev_priv->dev;
1835         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1836         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1837         uint32_t reg, val, pipeconf_val;
1838 
1839         /* PCH only available on ILK+ */
1840         BUG_ON(INTEL_INFO(dev)->gen < 5);
1841 
1842         /* Make sure PCH DPLL is enabled */
1843         assert_shared_dpll_enabled(dev_priv,
1844                                    intel_crtc_to_shared_dpll(intel_crtc));
1845 
1846         /* FDI must be feeding us bits for PCH ports */
1847         assert_fdi_tx_enabled(dev_priv, pipe);
1848         assert_fdi_rx_enabled(dev_priv, pipe);
1849 
1850         if (HAS_PCH_CPT(dev)) {
1851                 /* Workaround: Set the timing override bit before enabling the
1852                  * pch transcoder. */
1853                 reg = TRANS_CHICKEN2(pipe);
1854                 val = I915_READ(reg);
1855                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1856                 I915_WRITE(reg, val);
1857         }
1858 
1859         reg = PCH_TRANSCONF(pipe);
1860         val = I915_READ(reg);
1861         pipeconf_val = I915_READ(PIPECONF(pipe));
1862 
1863         if (HAS_PCH_IBX(dev_priv->dev)) {
1864                 /*
1865                  * make the BPC in transcoder be consistent with
1866                  * that in pipeconf reg.
1867                  */
1868                 val &= ~PIPECONF_BPC_MASK;
1869                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1870         }
1871 
1872         val &= ~TRANS_INTERLACE_MASK;
1873         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1874                 if (HAS_PCH_IBX(dev_priv->dev) &&
1875                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1876                         val |= TRANS_LEGACY_INTERLACED_ILK;
1877                 else
1878                         val |= TRANS_INTERLACED;
1879         else
1880                 val |= TRANS_PROGRESSIVE;
1881 
1882         I915_WRITE(reg, val | TRANS_ENABLE);
1883         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1884                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1885 }
1886 
1887 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1888                                       enum transcoder cpu_transcoder)
1889 {
1890         u32 val, pipeconf_val;
1891 
1892         /* PCH only available on ILK+ */
1893         BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
1894 
1895         /* FDI must be feeding us bits for PCH ports */
1896         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1897         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1898 
1899         /* Workaround: set timing override bit. */
1900         val = I915_READ(_TRANSA_CHICKEN2);
1901         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1902         I915_WRITE(_TRANSA_CHICKEN2, val);
1903 
1904         val = TRANS_ENABLE;
1905         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1906 
1907         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1908             PIPECONF_INTERLACED_ILK)
1909                 val |= TRANS_INTERLACED;
1910         else
1911                 val |= TRANS_PROGRESSIVE;
1912 
1913         I915_WRITE(LPT_TRANSCONF, val);
1914         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1915                 DRM_ERROR("Failed to enable PCH transcoder\n");
1916 }
1917 
1918 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1919                                             enum pipe pipe)
1920 {
1921         struct drm_device *dev = dev_priv->dev;
1922         uint32_t reg, val;
1923 
1924         /* FDI relies on the transcoder */
1925         assert_fdi_tx_disabled(dev_priv, pipe);
1926         assert_fdi_rx_disabled(dev_priv, pipe);
1927 
1928         /* Ports must be off as well */
1929         assert_pch_ports_disabled(dev_priv, pipe);
1930 
1931         reg = PCH_TRANSCONF(pipe);
1932         val = I915_READ(reg);
1933         val &= ~TRANS_ENABLE;
1934         I915_WRITE(reg, val);
1935         /* wait for PCH transcoder off, transcoder state */
1936         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1937                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1938 
1939         if (!HAS_PCH_IBX(dev)) {
1940                 /* Workaround: Clear the timing override chicken bit again. */
1941                 reg = TRANS_CHICKEN2(pipe);
1942                 val = I915_READ(reg);
1943                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1944                 I915_WRITE(reg, val);
1945         }
1946 }
1947 
1948 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1949 {
1950         u32 val;
1951 
1952         val = I915_READ(LPT_TRANSCONF);
1953         val &= ~TRANS_ENABLE;
1954         I915_WRITE(LPT_TRANSCONF, val);
1955         /* wait for PCH transcoder off, transcoder state */
1956         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1957                 DRM_ERROR("Failed to disable PCH transcoder\n");
1958 
1959         /* Workaround: clear timing override bit. */
1960         val = I915_READ(_TRANSA_CHICKEN2);
1961         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1962         I915_WRITE(_TRANSA_CHICKEN2, val);
1963 }
1964 
1965 /**
1966  * intel_enable_pipe - enable a pipe, asserting requirements
1967  * @crtc: crtc responsible for the pipe
1968  *
1969  * Enable @crtc's pipe, making sure that various hardware specific requirements
1970  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1971  */
1972 static void intel_enable_pipe(struct intel_crtc *crtc)
1973 {
1974         struct drm_device *dev = crtc->base.dev;
1975         struct drm_i915_private *dev_priv = dev->dev_private;
1976         enum pipe pipe = crtc->pipe;
1977         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1978                                                                       pipe);
1979         enum pipe pch_transcoder;
1980         int reg;
1981         u32 val;
1982 
1983         assert_planes_disabled(dev_priv, pipe);
1984         assert_cursor_disabled(dev_priv, pipe);
1985         assert_sprites_disabled(dev_priv, pipe);
1986 
1987         if (HAS_PCH_LPT(dev_priv->dev))
1988                 pch_transcoder = TRANSCODER_A;
1989         else
1990                 pch_transcoder = pipe;
1991 
1992         /*
1993          * A pipe without a PLL won't actually be able to drive bits from
1994          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1995          * need the check.
1996          */
1997         if (!HAS_PCH_SPLIT(dev_priv->dev))
1998                 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
1999                         assert_dsi_pll_enabled(dev_priv);
2000                 else
2001                         assert_pll_enabled(dev_priv, pipe);
2002         else {
2003                 if (crtc->config.has_pch_encoder) {
2004                         /* if driving the PCH, we need FDI enabled */
2005                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2006                         assert_fdi_tx_pll_enabled(dev_priv,
2007                                                   (enum pipe) cpu_transcoder);
2008                 }
2009                 /* FIXME: assert CPU port conditions for SNB+ */
2010         }
2011 
2012         reg = PIPECONF(cpu_transcoder);
2013         val = I915_READ(reg);
2014         if (val & PIPECONF_ENABLE) {
2015                 WARN_ON(!(pipe == PIPE_A &&
2016                           dev_priv->quirks & QUIRK_PIPEA_FORCE));
2017                 return;
2018         }
2019 
2020         I915_WRITE(reg, val | PIPECONF_ENABLE);
2021         POSTING_READ(reg);
2022 }
2023 
2024 /**
2025  * intel_disable_pipe - disable a pipe, asserting requirements
2026  * @dev_priv: i915 private structure
2027  * @pipe: pipe to disable
2028  *
2029  * Disable @pipe, making sure that various hardware specific requirements
2030  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
2031  *
2032  * @pipe should be %PIPE_A or %PIPE_B.
2033  *
2034  * Will wait until the pipe has shut down before returning.
2035  */
2036 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2037                                enum pipe pipe)
2038 {
2039         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2040                                                                       pipe);
2041         int reg;
2042         u32 val;
2043 
2044         /*
2045          * Make sure planes won't keep trying to pump pixels to us,
2046          * or we might hang the display.
2047          */
2048         assert_planes_disabled(dev_priv, pipe);
2049         assert_cursor_disabled(dev_priv, pipe);
2050         assert_sprites_disabled(dev_priv, pipe);
2051 
2052         /* Don't disable pipe A or pipe A PLLs if needed */
2053         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2054                 return;
2055 
2056         reg = PIPECONF(cpu_transcoder);
2057         val = I915_READ(reg);
2058         if ((val & PIPECONF_ENABLE) == 0)
2059                 return;
2060 
2061         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
2062         intel_wait_for_pipe_off(dev_priv->dev, pipe);
2063 }
2064 
2065 /*
2066  * Plane regs are double buffered, going from enabled->disabled needs a
2067  * trigger in order to latch.  The display address reg provides this.
2068  */
2069 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2070                                enum plane plane)
2071 {
2072         struct drm_device *dev = dev_priv->dev;
2073         u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2074 
2075         I915_WRITE(reg, I915_READ(reg));
2076         POSTING_READ(reg);
2077 }
2078 
2079 /**
2080  * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2081  * @dev_priv: i915 private structure
2082  * @plane: plane to enable
2083  * @pipe: pipe being fed
2084  *
2085  * Enable @plane on @pipe, making sure that @pipe is running first.
2086  */
2087 static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2088                                           enum plane plane, enum pipe pipe)
2089 {
2090         struct drm_device *dev = dev_priv->dev;
2091         struct intel_crtc *intel_crtc =
2092                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2093         int reg;
2094         u32 val;
2095 
2096         /* If the pipe isn't enabled, we can't pump pixels and may hang */
2097         assert_pipe_enabled(dev_priv, pipe);
2098 
2099         if (intel_crtc->primary_enabled)
2100                 return;
2101 
2102         intel_crtc->primary_enabled = true;
2103 
2104         reg = DSPCNTR(plane);
2105         val = I915_READ(reg);
2106         WARN_ON(val & DISPLAY_PLANE_ENABLE);
2107 
2108         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2109         intel_flush_primary_plane(dev_priv, plane);
2110 
2111         /*
2112          * BDW signals flip done immediately if the plane
2113          * is disabled, even if the plane enable is already
2114          * armed to occur at the next vblank :(
2115          */
2116         if (IS_BROADWELL(dev))
2117                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2118 }
2119 
2120 /**
2121  * intel_disable_primary_hw_plane - disable the primary hardware plane
2122  * @dev_priv: i915 private structure
2123  * @plane: plane to disable
2124  * @pipe: pipe consuming the data
2125  *
2126  * Disable @plane; should be an independent operation.
2127  */
2128 static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
2129                                            enum plane plane, enum pipe pipe)
2130 {
2131         struct intel_crtc *intel_crtc =
2132                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2133         int reg;
2134         u32 val;
2135 
2136         if (!intel_crtc->primary_enabled)
2137                 return;
2138 
2139         intel_crtc->primary_enabled = false;
2140 
2141         reg = DSPCNTR(plane);
2142         val = I915_READ(reg);
2143         WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
2144 
2145         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
2146         intel_flush_primary_plane(dev_priv, plane);
2147 }
2148 
2149 static bool need_vtd_wa(struct drm_device *dev)
2150 {
2151 #ifdef CONFIG_INTEL_IOMMU
2152         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2153                 return true;
2154 #endif
2155         return false;
2156 }
2157 
2158 static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2159 {
2160         int tile_height;
2161 
2162         tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2163         return ALIGN(height, tile_height);
2164 }
2165 
2166 int
2167 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2168                            struct drm_i915_gem_object *obj,
2169                            struct intel_engine_cs *pipelined)
2170 {
2171         struct drm_i915_private *dev_priv = dev->dev_private;
2172         u32 alignment;
2173         int ret;
2174 
2175         switch (obj->tiling_mode) {
2176         case I915_TILING_NONE:
2177                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2178                         alignment = 128 * 1024;
2179                 else if (INTEL_INFO(dev)->gen >= 4)
2180                         alignment = 4 * 1024;
2181                 else
2182                         alignment = 64 * 1024;
2183                 break;
2184         case I915_TILING_X:
2185                 /* pin() will align the object as required by fence */
2186                 alignment = 0;
2187                 break;
2188         case I915_TILING_Y:
2189                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
2190                 return -EINVAL;
2191         default:
2192                 BUG();
2193         }
2194 
2195         /* Note that the w/a also requires 64 PTE of padding following the
2196          * bo. We currently fill all unused PTE with the shadow page and so
2197          * we should always have valid PTE following the scanout preventing
2198          * the VT-d warning.
2199          */
2200         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2201                 alignment = 256 * 1024;
2202 
2203         dev_priv->mm.interruptible = false;
2204         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2205         if (ret)
2206                 goto err_interruptible;
2207 
2208         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2209          * fence, whereas 965+ only requires a fence if using
2210          * framebuffer compression.  For simplicity, we always install
2211          * a fence as the cost is not that onerous.
2212          */
2213         ret = i915_gem_object_get_fence(obj);
2214         if (ret)
2215                 goto err_unpin;
2216 
2217         i915_gem_object_pin_fence(obj);
2218 
2219         dev_priv->mm.interruptible = true;
2220         return 0;
2221 
2222 err_unpin:
2223         i915_gem_object_unpin_from_display_plane(obj);
2224 err_interruptible:
2225         dev_priv->mm.interruptible = true;
2226         return ret;
2227 }
2228 
2229 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2230 {
2231         i915_gem_object_unpin_fence(obj);
2232         i915_gem_object_unpin_from_display_plane(obj);
2233 }
2234 
2235 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2236  * is assumed to be a power-of-two. */
2237 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2238                                              unsigned int tiling_mode,
2239                                              unsigned int cpp,
2240                                              unsigned int pitch)
2241 {
2242         if (tiling_mode != I915_TILING_NONE) {
2243                 unsigned int tile_rows, tiles;
2244 
2245                 tile_rows = *y / 8;
2246                 *y %= 8;
2247 
2248                 tiles = *x / (512/cpp);
2249                 *x %= 512/cpp;
2250 
2251                 return tile_rows * pitch * 8 + tiles * 4096;
2252         } else {
2253                 unsigned int offset;
2254 
2255                 offset = *y * pitch + *x * cpp;
2256                 *y = 0;
2257                 *x = (offset & 4095) / cpp;
2258                 return offset & -4096;
2259         }
2260 }
2261 
2262 int intel_format_to_fourcc(int format)
2263 {
2264         switch (format) {
2265         case DISPPLANE_8BPP:
2266                 return DRM_FORMAT_C8;
2267         case DISPPLANE_BGRX555:
2268                 return DRM_FORMAT_XRGB1555;
2269         case DISPPLANE_BGRX565:
2270                 return DRM_FORMAT_RGB565;
2271         default:
2272         case DISPPLANE_BGRX888:
2273                 return DRM_FORMAT_XRGB8888;
2274         case DISPPLANE_RGBX888:
2275                 return DRM_FORMAT_XBGR8888;
2276         case DISPPLANE_BGRX101010:
2277                 return DRM_FORMAT_XRGB2101010;
2278         case DISPPLANE_RGBX101010:
2279                 return DRM_FORMAT_XBGR2101010;
2280         }
2281 }
2282 
2283 static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2284                                   struct intel_plane_config *plane_config)
2285 {
2286         struct drm_device *dev = crtc->base.dev;
2287         struct drm_i915_gem_object *obj = NULL;
2288         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2289         u32 base = plane_config->base;
2290 
2291         if (plane_config->size == 0)
2292                 return false;
2293 
2294         obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2295                                                              plane_config->size);
2296         if (!obj)
2297                 return false;
2298 
2299         if (plane_config->tiled) {
2300                 obj->tiling_mode = I915_TILING_X;
2301                 obj->stride = crtc->base.primary->fb->pitches[0];
2302         }
2303 
2304         mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2305         mode_cmd.width = crtc->base.primary->fb->width;
2306         mode_cmd.height = crtc->base.primary->fb->height;
2307         mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2308 
2309         mutex_lock(&dev->struct_mutex);
2310 
2311         if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2312                                    &mode_cmd, obj)) {
2313                 DRM_DEBUG_KMS("intel fb init failed\n");
2314                 goto out_unref_obj;
2315         }
2316 
2317         mutex_unlock(&dev->struct_mutex);
2318 
2319         DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2320         return true;
2321 
2322 out_unref_obj:
2323         drm_gem_object_unreference(&obj->base);
2324         mutex_unlock(&dev->struct_mutex);
2325         return false;
2326 }
2327 
2328 static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2329                                  struct intel_plane_config *plane_config)
2330 {
2331         struct drm_device *dev = intel_crtc->base.dev;
2332         struct drm_crtc *c;
2333         struct intel_crtc *i;
2334         struct intel_framebuffer *fb;
2335 
2336         if (!intel_crtc->base.primary->fb)
2337                 return;
2338 
2339         if (intel_alloc_plane_obj(intel_crtc, plane_config))
2340                 return;
2341 
2342         kfree(intel_crtc->base.primary->fb);
2343         intel_crtc->base.primary->fb = NULL;
2344 
2345         /*
2346          * Failed to alloc the obj, check to see if we should share
2347          * an fb with another CRTC instead
2348          */
2349         for_each_crtc(dev, c) {
2350                 i = to_intel_crtc(c);
2351 
2352                 if (c == &intel_crtc->base)
2353                         continue;
2354 
2355                 if (!i->active || !c->primary->fb)
2356                         continue;
2357 
2358                 fb = to_intel_framebuffer(c->primary->fb);
2359                 if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
2360                         drm_framebuffer_reference(c->primary->fb);
2361                         intel_crtc->base.primary->fb = c->primary->fb;
2362                         break;
2363                 }
2364         }
2365 }
2366 
2367 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2368                                       struct drm_framebuffer *fb,
2369                                       int x, int y)
2370 {
2371         struct drm_device *dev = crtc->dev;
2372         struct drm_i915_private *dev_priv = dev->dev_private;
2373         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2374         struct intel_framebuffer *intel_fb;
2375         struct drm_i915_gem_object *obj;
2376         int plane = intel_crtc->plane;
2377         unsigned long linear_offset;
2378         u32 dspcntr;
2379         u32 reg;
2380 
2381         intel_fb = to_intel_framebuffer(fb);
2382         obj = intel_fb->obj;
2383 
2384         reg = DSPCNTR(plane);
2385         dspcntr = I915_READ(reg);
2386         /* Mask out pixel format bits in case we change it */
2387         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2388         switch (fb->pixel_format) {
2389         case DRM_FORMAT_C8:
2390                 dspcntr |= DISPPLANE_8BPP;
2391                 break;
2392         case DRM_FORMAT_XRGB1555:
2393         case DRM_FORMAT_ARGB1555:
2394                 dspcntr |= DISPPLANE_BGRX555;
2395                 break;
2396         case DRM_FORMAT_RGB565:
2397                 dspcntr |= DISPPLANE_BGRX565;
2398                 break;
2399         case DRM_FORMAT_XRGB8888:
2400         case DRM_FORMAT_ARGB8888:
2401                 dspcntr |= DISPPLANE_BGRX888;
2402                 break;
2403         case DRM_FORMAT_XBGR8888:
2404         case DRM_FORMAT_ABGR8888:
2405                 dspcntr |= DISPPLANE_RGBX888;
2406                 break;
2407         case DRM_FORMAT_XRGB2101010:
2408         case DRM_FORMAT_ARGB2101010:
2409                 dspcntr |= DISPPLANE_BGRX101010;
2410                 break;
2411         case DRM_FORMAT_XBGR2101010:
2412         case DRM_FORMAT_ABGR2101010:
2413                 dspcntr |= DISPPLANE_RGBX101010;
2414                 break;
2415         default:
2416                 BUG();
2417         }
2418 
2419         if (INTEL_INFO(dev)->gen >= 4) {
2420                 if (obj->tiling_mode != I915_TILING_NONE)
2421                         dspcntr |= DISPPLANE_TILED;
2422                 else
2423                         dspcntr &= ~DISPPLANE_TILED;
2424         }
2425 
2426         if (IS_G4X(dev))
2427                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2428 
2429         I915_WRITE(reg, dspcntr);
2430 
2431         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2432 
2433         if (INTEL_INFO(dev)->gen >= 4) {
2434                 intel_crtc->dspaddr_offset =
2435                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2436                                                        fb->bits_per_pixel / 8,
2437                                                        fb->pitches[0]);
2438                 linear_offset -= intel_crtc->dspaddr_offset;
2439         } else {
2440                 intel_crtc->dspaddr_offset = linear_offset;
2441         }
2442 
2443         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2444                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2445                       fb->pitches[0]);
2446         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2447         if (INTEL_INFO(dev)->gen >= 4) {
2448                 I915_WRITE(DSPSURF(plane),
2449                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2450                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2451                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2452         } else
2453                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2454         POSTING_READ(reg);
2455 }
2456 
2457 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2458                                           struct drm_framebuffer *fb,
2459                                           int x, int y)
2460 {
2461         struct drm_device *dev = crtc->dev;
2462         struct drm_i915_private *dev_priv = dev->dev_private;
2463         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2464         struct intel_framebuffer *intel_fb;
2465         struct drm_i915_gem_object *obj;
2466         int plane = intel_crtc->plane;
2467         unsigned long linear_offset;
2468         u32 dspcntr;
2469         u32 reg;
2470 
2471         intel_fb = to_intel_framebuffer(fb);
2472         obj = intel_fb->obj;
2473 
2474         reg = DSPCNTR(plane);
2475         dspcntr = I915_READ(reg);
2476         /* Mask out pixel format bits in case we change it */
2477         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2478         switch (fb->pixel_format) {
2479         case DRM_FORMAT_C8:
2480                 dspcntr |= DISPPLANE_8BPP;
2481                 break;
2482         case DRM_FORMAT_RGB565:
2483                 dspcntr |= DISPPLANE_BGRX565;
2484                 break;
2485         case DRM_FORMAT_XRGB8888:
2486         case DRM_FORMAT_ARGB8888:
2487                 dspcntr |= DISPPLANE_BGRX888;
2488                 break;
2489         case DRM_FORMAT_XBGR8888:
2490         case DRM_FORMAT_ABGR8888:
2491                 dspcntr |= DISPPLANE_RGBX888;
2492                 break;
2493         case DRM_FORMAT_XRGB2101010:
2494         case DRM_FORMAT_ARGB2101010:
2495                 dspcntr |= DISPPLANE_BGRX101010;
2496                 break;
2497         case DRM_FORMAT_XBGR2101010:
2498         case DRM_FORMAT_ABGR2101010:
2499                 dspcntr |= DISPPLANE_RGBX101010;
2500                 break;
2501         default:
2502                 BUG();
2503         }
2504 
2505         if (obj->tiling_mode != I915_TILING_NONE)
2506                 dspcntr |= DISPPLANE_TILED;
2507         else
2508                 dspcntr &= ~DISPPLANE_TILED;
2509 
2510         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2511                 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2512         else
2513                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2514 
2515         I915_WRITE(reg, dspcntr);
2516 
2517         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2518         intel_crtc->dspaddr_offset =
2519                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2520                                                fb->bits_per_pixel / 8,
2521                                                fb->pitches[0]);
2522         linear_offset -= intel_crtc->dspaddr_offset;
2523 
2524         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2525                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2526                       fb->pitches[0]);
2527         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2528         I915_WRITE(DSPSURF(plane),
2529                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2530         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2531                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2532         } else {
2533                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2534                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2535         }
2536         POSTING_READ(reg);
2537 }
2538 
2539 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2540 static int
2541 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2542                            int x, int y, enum mode_set_atomic state)
2543 {
2544         struct drm_device *dev = crtc->dev;
2545         struct drm_i915_private *dev_priv = dev->dev_private;
2546 
2547         if (dev_priv->display.disable_fbc)
2548                 dev_priv->display.disable_fbc(dev);
2549         intel_increase_pllclock(crtc);
2550 
2551         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2552 
2553         return 0;
2554 }
2555 
2556 void intel_display_handle_reset(struct drm_device *dev)
2557 {
2558         struct drm_i915_private *dev_priv = dev->dev_private;
2559         struct drm_crtc *crtc;
2560 
2561         /*
2562          * Flips in the rings have been nuked by the reset,
2563          * so complete all pending flips so that user space
2564          * will get its events and not get stuck.
2565          *
2566          * Also update the base address of all primary
2567          * planes to the the last fb to make sure we're
2568          * showing the correct fb after a reset.
2569          *
2570          * Need to make two loops over the crtcs so that we
2571          * don't try to grab a crtc mutex before the
2572          * pending_flip_queue really got woken up.
2573          */
2574 
2575         for_each_crtc(dev, crtc) {
2576                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2577                 enum plane plane = intel_crtc->plane;
2578 
2579                 intel_prepare_page_flip(dev, plane);
2580                 intel_finish_page_flip_plane(dev, plane);
2581         }
2582 
2583         for_each_crtc(dev, crtc) {
2584                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2585 
2586                 drm_modeset_lock(&crtc->mutex, NULL);
2587                 /*
2588                  * FIXME: Once we have proper support for primary planes (and
2589                  * disabling them without disabling the entire crtc) allow again
2590                  * a NULL crtc->primary->fb.
2591                  */
2592                 if (intel_crtc->active && crtc->primary->fb)
2593                         dev_priv->display.update_primary_plane(crtc,
2594                                                                crtc->primary->fb,
2595                                                                crtc->x,
2596                                                                crtc->y);
2597                 drm_modeset_unlock(&crtc->mutex);
2598         }
2599 }
2600 
2601 static int
2602 intel_finish_fb(struct drm_framebuffer *old_fb)
2603 {
2604         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2605         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2606         bool was_interruptible = dev_priv->mm.interruptible;
2607         int ret;
2608 
2609         /* Big Hammer, we also need to ensure that any pending
2610          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2611          * current scanout is retired before unpinning the old
2612          * framebuffer.
2613          *
2614          * This should only fail upon a hung GPU, in which case we
2615          * can safely continue.
2616          */
2617         dev_priv->mm.interruptible = false;
2618         ret = i915_gem_object_finish_gpu(obj);
2619         dev_priv->mm.interruptible = was_interruptible;
2620 
2621         return ret;
2622 }
2623 
2624 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2625 {
2626         struct drm_device *dev = crtc->dev;
2627         struct drm_i915_private *dev_priv = dev->dev_private;
2628         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2629         unsigned long flags;
2630         bool pending;
2631 
2632         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2633             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2634                 return false;
2635 
2636         spin_lock_irqsave(&dev->event_lock, flags);
2637         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2638         spin_unlock_irqrestore(&dev->event_lock, flags);
2639 
2640         return pending;
2641 }
2642 
2643 static int
2644 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2645                     struct drm_framebuffer *fb)
2646 {
2647         struct drm_device *dev = crtc->dev;
2648         struct drm_i915_private *dev_priv = dev->dev_private;
2649         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2650         struct drm_framebuffer *old_fb;
2651         int ret;
2652 
2653         if (intel_crtc_has_pending_flip(crtc)) {
2654                 DRM_ERROR("pipe is still busy with an old pageflip\n");
2655                 return -EBUSY;
2656         }
2657 
2658         /* no fb bound */
2659         if (!fb) {
2660                 DRM_ERROR("No FB bound\n");
2661                 return 0;
2662         }
2663 
2664         if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2665                 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2666                           plane_name(intel_crtc->plane),
2667                           INTEL_INFO(dev)->num_pipes);
2668                 return -EINVAL;
2669         }
2670 
2671         mutex_lock(&dev->struct_mutex);
2672         ret = intel_pin_and_fence_fb_obj(dev,
2673                                          to_intel_framebuffer(fb)->obj,
2674                                          NULL);
2675         mutex_unlock(&dev->struct_mutex);
2676         if (ret != 0) {
2677                 DRM_ERROR("pin & fence failed\n");
2678                 return ret;
2679         }
2680 
2681         /*
2682          * Update pipe size and adjust fitter if needed: the reason for this is
2683          * that in compute_mode_changes we check the native mode (not the pfit
2684          * mode) to see if we can flip rather than do a full mode set. In the
2685          * fastboot case, we'll flip, but if we don't update the pipesrc and
2686          * pfit state, we'll end up with a big fb scanned out into the wrong
2687          * sized surface.
2688          *
2689          * To fix this properly, we need to hoist the checks up into
2690          * compute_mode_changes (or above), check the actual pfit state and
2691          * whether the platform allows pfit disable with pipe active, and only
2692          * then update the pipesrc and pfit state, even on the flip path.
2693          */
2694         if (i915.fastboot) {
2695                 const struct drm_display_mode *adjusted_mode =
2696                         &intel_crtc->config.adjusted_mode;
2697 
2698                 I915_WRITE(PIPESRC(intel_crtc->pipe),
2699                            ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2700                            (adjusted_mode->crtc_vdisplay - 1));
2701                 if (!intel_crtc->config.pch_pfit.enabled &&
2702                     (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2703                      intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2704                         I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2705                         I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2706                         I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2707                 }
2708                 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2709                 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2710         }
2711 
2712         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2713 
2714         old_fb = crtc->primary->fb;
2715         crtc->primary->fb = fb;
2716         crtc->x = x;
2717         crtc->y = y;
2718 
2719         if (old_fb) {
2720                 if (intel_crtc->active && old_fb != fb)
2721                         intel_wait_for_vblank(dev, intel_crtc->pipe);
2722                 mutex_lock(&dev->struct_mutex);
2723                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2724                 mutex_unlock(&dev->struct_mutex);
2725         }
2726 
2727         mutex_lock(&dev->struct_mutex);
2728         intel_update_fbc(dev);
2729         intel_edp_psr_update(dev);
2730         mutex_unlock(&dev->struct_mutex);
2731 
2732         return 0;
2733 }
2734 
2735 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2736 {
2737         struct drm_device *dev = crtc->dev;
2738         struct drm_i915_private *dev_priv = dev->dev_private;
2739         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2740         int pipe = intel_crtc->pipe;
2741         u32 reg, temp;
2742 
2743         /* enable normal train */
2744         reg = FDI_TX_CTL(pipe);
2745         temp = I915_READ(reg);
2746         if (IS_IVYBRIDGE(dev)) {
2747                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2748                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2749         } else {
2750                 temp &= ~FDI_LINK_TRAIN_NONE;
2751                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2752         }
2753         I915_WRITE(reg, temp);
2754 
2755         reg = FDI_RX_CTL(pipe);
2756         temp = I915_READ(reg);
2757         if (HAS_PCH_CPT(dev)) {
2758                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2759                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2760         } else {
2761                 temp &= ~FDI_LINK_TRAIN_NONE;
2762                 temp |= FDI_LINK_TRAIN_NONE;
2763         }
2764         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2765 
2766         /* wait one idle pattern time */
2767         POSTING_READ(reg);
2768         udelay(1000);
2769 
2770         /* IVB wants error correction enabled */
2771         if (IS_IVYBRIDGE(dev))
2772                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2773                            FDI_FE_ERRC_ENABLE);
2774 }
2775 
2776 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2777 {
2778         return crtc->base.enabled && crtc->active &&
2779                 crtc->config.has_pch_encoder;
2780 }
2781 
2782 static void ivb_modeset_global_resources(struct drm_device *dev)
2783 {
2784         struct drm_i915_private *dev_priv = dev->dev_private;
2785         struct intel_crtc *pipe_B_crtc =
2786                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2787         struct intel_crtc *pipe_C_crtc =
2788                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2789         uint32_t temp;
2790 
2791         /*
2792          * When everything is off disable fdi C so that we could enable fdi B
2793          * with all lanes. Note that we don't care about enabled pipes without
2794          * an enabled pch encoder.
2795          */
2796         if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2797             !pipe_has_enabled_pch(pipe_C_crtc)) {
2798                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2799                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2800 
2801                 temp = I915_READ(SOUTH_CHICKEN1);
2802                 temp &= ~FDI_BC_BIFURCATION_SELECT;
2803                 DRM_DEBUG_KMS("disabling fdi C rx\n");
2804                 I915_WRITE(SOUTH_CHICKEN1, temp);
2805         }
2806 }
2807 
2808 /* The FDI link training functions for ILK/Ibexpeak. */
2809 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2810 {
2811         struct drm_device *dev = crtc->dev;
2812         struct drm_i915_private *dev_priv = dev->dev_private;
2813         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2814         int pipe = intel_crtc->pipe;
2815         u32 reg, temp, tries;
2816 
2817         /* FDI needs bits from pipe first */
2818         assert_pipe_enabled(dev_priv, pipe);
2819 
2820         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2821            for train result */
2822         reg = FDI_RX_IMR(pipe);
2823         temp = I915_READ(reg);
2824         temp &= ~FDI_RX_SYMBOL_LOCK;
2825         temp &= ~FDI_RX_BIT_LOCK;
2826         I915_WRITE(reg, temp);
2827         I915_READ(reg);
2828         udelay(150);
2829 
2830         /* enable CPU FDI TX and PCH FDI RX */
2831         reg = FDI_TX_CTL(pipe);
2832         temp = I915_READ(reg);
2833         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2834         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2835         temp &= ~FDI_LINK_TRAIN_NONE;
2836         temp |= FDI_LINK_TRAIN_PATTERN_1;
2837         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2838 
2839         reg = FDI_RX_CTL(pipe);
2840         temp = I915_READ(reg);
2841         temp &= ~FDI_LINK_TRAIN_NONE;
2842         temp |= FDI_LINK_TRAIN_PATTERN_1;
2843         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2844 
2845         POSTING_READ(reg);
2846         udelay(150);
2847 
2848         /* Ironlake workaround, enable clock pointer after FDI enable*/
2849         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2850         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2851                    FDI_RX_PHASE_SYNC_POINTER_EN);
2852 
2853         reg = FDI_RX_IIR(pipe);
2854         for (tries = 0; tries < 5; tries++) {
2855                 temp = I915_READ(reg);
2856                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2857 
2858                 if ((temp & FDI_RX_BIT_LOCK)) {
2859                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2860                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2861                         break;
2862                 }
2863         }
2864         if (tries == 5)
2865                 DRM_ERROR("FDI train 1 fail!\n");
2866 
2867         /* Train 2 */
2868         reg = FDI_TX_CTL(pipe);
2869         temp = I915_READ(reg);
2870         temp &= ~FDI_LINK_TRAIN_NONE;
2871         temp |= FDI_LINK_TRAIN_PATTERN_2;
2872         I915_WRITE(reg, temp);
2873 
2874         reg = FDI_RX_CTL(pipe);
2875         temp = I915_READ(reg);
2876         temp &= ~FDI_LINK_TRAIN_NONE;
2877         temp |= FDI_LINK_TRAIN_PATTERN_2;
2878         I915_WRITE(reg, temp);
2879 
2880         POSTING_READ(reg);
2881         udelay(150);
2882 
2883         reg = FDI_RX_IIR(pipe);
2884         for (tries = 0; tries < 5; tries++) {
2885                 temp = I915_READ(reg);
2886                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2887 
2888                 if (temp & FDI_RX_SYMBOL_LOCK) {
2889                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2890                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2891                         break;
2892                 }
2893         }
2894         if (tries == 5)
2895                 DRM_ERROR("FDI train 2 fail!\n");
2896 
2897         DRM_DEBUG_KMS("FDI train done\n");
2898 
2899 }
2900 
2901 static const int snb_b_fdi_train_param[] = {
2902         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2903         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2904         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2905         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2906 };
2907 
2908 /* The FDI link training functions for SNB/Cougarpoint. */
2909 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2910 {
2911         struct drm_device *dev = crtc->dev;
2912         struct drm_i915_private *dev_priv = dev->dev_private;
2913         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2914         int pipe = intel_crtc->pipe;
2915         u32 reg, temp, i, retry;
2916 
2917         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2918            for train result */
2919         reg = FDI_RX_IMR(pipe);
2920         temp = I915_READ(reg);
2921         temp &= ~FDI_RX_SYMBOL_LOCK;
2922         temp &= ~FDI_RX_BIT_LOCK;
2923         I915_WRITE(reg, temp);
2924 
2925         POSTING_READ(reg);
2926         udelay(150);
2927 
2928         /* enable CPU FDI TX and PCH FDI RX */
2929         reg = FDI_TX_CTL(pipe);
2930         temp = I915_READ(reg);
2931         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2932         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2933         temp &= ~FDI_LINK_TRAIN_NONE;
2934         temp |= FDI_LINK_TRAIN_PATTERN_1;
2935         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2936         /* SNB-B */
2937         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2938         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2939 
2940         I915_WRITE(FDI_RX_MISC(pipe),
2941                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2942 
2943         reg = FDI_RX_CTL(pipe);
2944         temp = I915_READ(reg);
2945         if (HAS_PCH_CPT(dev)) {
2946                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2947                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2948         } else {
2949                 temp &= ~FDI_LINK_TRAIN_NONE;
2950                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2951         }
2952         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2953 
2954         POSTING_READ(reg);
2955         udelay(150);
2956 
2957         for (i = 0; i < 4; i++) {
2958                 reg = FDI_TX_CTL(pipe);
2959                 temp = I915_READ(reg);
2960                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2961                 temp |= snb_b_fdi_train_param[i];
2962                 I915_WRITE(reg, temp);
2963 
2964                 POSTING_READ(reg);
2965                 udelay(500);
2966 
2967                 for (retry = 0; retry < 5; retry++) {
2968                         reg = FDI_RX_IIR(pipe);
2969                         temp = I915_READ(reg);
2970                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2971                         if (temp & FDI_RX_BIT_LOCK) {
2972                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2973                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
2974                                 break;
2975                         }
2976                         udelay(50);
2977                 }
2978                 if (retry < 5)
2979                         break;
2980         }
2981         if (i == 4)
2982                 DRM_ERROR("FDI train 1 fail!\n");
2983 
2984         /* Train 2 */
2985         reg = FDI_TX_CTL(pipe);
2986         temp = I915_READ(reg);
2987         temp &= ~FDI_LINK_TRAIN_NONE;
2988         temp |= FDI_LINK_TRAIN_PATTERN_2;
2989         if (IS_GEN6(dev)) {
2990                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2991                 /* SNB-B */
2992                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2993         }
2994         I915_WRITE(reg, temp);
2995 
2996         reg = FDI_RX_CTL(pipe);
2997         temp = I915_READ(reg);
2998         if (HAS_PCH_CPT(dev)) {
2999                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3000                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3001         } else {
3002                 temp &= ~FDI_LINK_TRAIN_NONE;
3003                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3004         }
3005         I915_WRITE(reg, temp);
3006 
3007         POSTING_READ(reg);
3008         udelay(150);
3009 
3010         for (i = 0; i < 4; i++) {
3011                 reg = FDI_TX_CTL(pipe);
3012                 temp = I915_READ(reg);
3013                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3014                 temp |= snb_b_fdi_train_param[i];
3015                 I915_WRITE(reg, temp);
3016 
3017                 POSTING_READ(reg);
3018                 udelay(500);
3019 
3020                 for (retry = 0; retry < 5; retry++) {
3021                         reg = FDI_RX_IIR(pipe);
3022                         temp = I915_READ(reg);
3023                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3024                         if (temp & FDI_RX_SYMBOL_LOCK) {
3025                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3026                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3027                                 break;
3028                         }
3029                         udelay(50);
3030                 }
3031                 if (retry < 5)
3032                         break;
3033         }
3034         if (i == 4)
3035                 DRM_ERROR("FDI train 2 fail!\n");
3036 
3037         DRM_DEBUG_KMS("FDI train done.\n");
3038 }
3039 
3040 /* Manual link training for Ivy Bridge A0 parts */
3041 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3042 {
3043         struct drm_device *dev = crtc->dev;
3044         struct drm_i915_private *dev_priv = dev->dev_private;
3045         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3046         int pipe = intel_crtc->pipe;
3047         u32 reg, temp, i, j;
3048 
3049         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3050            for train result */
3051         reg = FDI_RX_IMR(pipe);
3052         temp = I915_READ(reg);
3053         temp &= ~FDI_RX_SYMBOL_LOCK;
3054         temp &= ~FDI_RX_BIT_LOCK;
3055         I915_WRITE(reg, temp);
3056 
3057         POSTING_READ(reg);
3058         udelay(150);
3059 
3060         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3061                       I915_READ(FDI_RX_IIR(pipe)));
3062 
3063         /* Try each vswing and preemphasis setting twice before moving on */
3064         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3065                 /* disable first in case we need to retry */
3066                 reg = FDI_TX_CTL(pipe);
3067                 temp = I915_READ(reg);
3068                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3069                 temp &= ~FDI_TX_ENABLE;
3070                 I915_WRITE(reg, temp);
3071 
3072                 reg = FDI_RX_CTL(pipe);
3073                 temp = I915_READ(reg);
3074                 temp &= ~FDI_LINK_TRAIN_AUTO;
3075                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3076                 temp &= ~FDI_RX_ENABLE;
3077                 I915_WRITE(reg, temp);
3078 
3079                 /* enable CPU FDI TX and PCH FDI RX */
3080                 reg = FDI_TX_CTL(pipe);
3081                 temp = I915_READ(reg);
3082                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3083                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3084                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3085                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3086                 temp |= snb_b_fdi_train_param[j/2];
3087                 temp |= FDI_COMPOSITE_SYNC;
3088                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3089 
3090                 I915_WRITE(FDI_RX_MISC(pipe),
3091                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3092 
3093                 reg = FDI_RX_CTL(pipe);
3094                 temp = I915_READ(reg);
3095                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3096                 temp |= FDI_COMPOSITE_SYNC;
3097                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3098 
3099                 POSTING_READ(reg);
3100                 udelay(1); /* should be 0.5us */
3101 
3102                 for (i = 0; i < 4; i++) {
3103                         reg = FDI_RX_IIR(pipe);
3104                         temp = I915_READ(reg);
3105                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3106 
3107                         if (temp & FDI_RX_BIT_LOCK ||
3108                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3109                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3110                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3111                                               i);
3112                                 break;
3113                         }
3114                         udelay(1); /* should be 0.5us */
3115                 }
3116                 if (i == 4) {
3117                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3118                         continue;
3119                 }
3120 
3121                 /* Train 2 */
3122                 reg = FDI_TX_CTL(pipe);
3123                 temp = I915_READ(reg);
3124                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3125                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3126                 I915_WRITE(reg, temp);
3127 
3128                 reg = FDI_RX_CTL(pipe);
3129                 temp = I915_READ(reg);
3130                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3131                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3132                 I915_WRITE(reg, temp);
3133 
3134                 POSTING_READ(reg);
3135                 udelay(2); /* should be 1.5us */
3136 
3137                 for (i = 0; i < 4; i++) {
3138                         reg = FDI_RX_IIR(pipe);
3139                         temp = I915_READ(reg);
3140                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3141 
3142                         if (temp & FDI_RX_SYMBOL_LOCK ||
3143                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3144                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3145                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3146                                               i);
3147                                 goto train_done;
3148                         }
3149                         udelay(2); /* should be 1.5us */
3150                 }
3151                 if (i == 4)
3152                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3153         }
3154 
3155 train_done:
3156         DRM_DEBUG_KMS("FDI train done.\n");
3157 }
3158 
3159 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3160 {
3161         struct drm_device *dev = intel_crtc->base.dev;
3162         struct drm_i915_private *dev_priv = dev->dev_private;
3163         int pipe = intel_crtc->pipe;
3164         u32 reg, temp;
3165 
3166 
3167         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3168         reg = FDI_RX_CTL(pipe);
3169         temp = I915_READ(reg);
3170         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3171         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3172         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3173         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3174 
3175         POSTING_READ(reg);
3176         udelay(200);
3177 
3178         /* Switch from Rawclk to PCDclk */
3179         temp = I915_READ(reg);
3180         I915_WRITE(reg, temp | FDI_PCDCLK);
3181 
3182         POSTING_READ(reg);
3183         udelay(200);
3184 
3185         /* Enable CPU FDI TX PLL, always on for Ironlake */
3186         reg = FDI_TX_CTL(pipe);
3187         temp = I915_READ(reg);
3188         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3189                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3190 
3191                 POSTING_READ(reg);
3192                 udelay(100);
3193         }
3194 }
3195 
3196 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3197 {
3198         struct drm_device *dev = intel_crtc->base.dev;
3199         struct drm_i915_private *dev_priv = dev->dev_private;
3200         int pipe = intel_crtc->pipe;
3201         u32 reg, temp;
3202 
3203         /* Switch from PCDclk to Rawclk */
3204         reg = FDI_RX_CTL(pipe);
3205         temp = I915_READ(reg);
3206         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3207 
3208         /* Disable CPU FDI TX PLL */
3209         reg = FDI_TX_CTL(pipe);
3210         temp = I915_READ(reg);
3211         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3212 
3213         POSTING_READ(reg);
3214         udelay(100);
3215 
3216         reg = FDI_RX_CTL(pipe);
3217         temp = I915_READ(reg);
3218         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3219 
3220         /* Wait for the clocks to turn off. */
3221         POSTING_READ(reg);
3222         udelay(100);
3223 }
3224 
3225 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3226 {
3227         struct drm_device *dev = crtc->dev;
3228         struct drm_i915_private *dev_priv = dev->dev_private;
3229         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3230         int pipe = intel_crtc->pipe;
3231         u32 reg, temp;
3232 
3233         /* disable CPU FDI tx and PCH FDI rx */
3234         reg = FDI_TX_CTL(pipe);
3235         temp = I915_READ(reg);
3236         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3237         POSTING_READ(reg);
3238 
3239         reg = FDI_RX_CTL(pipe);
3240         temp = I915_READ(reg);
3241         temp &= ~(0x7 << 16);
3242         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3243         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3244 
3245         POSTING_READ(reg);
3246         udelay(100);
3247 
3248         /* Ironlake workaround, disable clock pointer after downing FDI */
3249         if (HAS_PCH_IBX(dev))
3250                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3251 
3252         /* still set train pattern 1 */
3253         reg = FDI_TX_CTL(pipe);
3254         temp = I915_READ(reg);
3255         temp &= ~FDI_LINK_TRAIN_NONE;
3256         temp |= FDI_LINK_TRAIN_PATTERN_1;
3257         I915_WRITE(reg, temp);
3258 
3259         reg = FDI_RX_CTL(pipe);
3260         temp = I915_READ(reg);
3261         if (HAS_PCH_CPT(dev)) {
3262                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3263                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3264         } else {
3265                 temp &= ~FDI_LINK_TRAIN_NONE;
3266                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3267         }
3268         /* BPC in FDI rx is consistent with that in PIPECONF */
3269         temp &= ~(0x07 << 16);
3270         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3271         I915_WRITE(reg, temp);
3272 
3273         POSTING_READ(reg);
3274         udelay(100);
3275 }
3276 
3277 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3278 {
3279         struct intel_crtc *crtc;
3280 
3281         /* Note that we don't need to be called with mode_config.lock here
3282          * as our list of CRTC objects is static for the lifetime of the
3283          * device and so cannot disappear as we iterate. Similarly, we can
3284          * happily treat the predicates as racy, atomic checks as userspace
3285          * cannot claim and pin a new fb without at least acquring the
3286          * struct_mutex and so serialising with us.
3287          */
3288         for_each_intel_crtc(dev, crtc) {
3289                 if (atomic_read(&crtc->unpin_work_count) == 0)
3290                         continue;
3291 
3292                 if (crtc->unpin_work)
3293                         intel_wait_for_vblank(dev, crtc->pipe);
3294 
3295                 return true;
3296         }
3297 
3298         return false;
3299 }
3300 
3301 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3302 {
3303         struct drm_device *dev = crtc->dev;
3304         struct drm_i915_private *dev_priv = dev->dev_private;
3305 
3306         if (crtc->primary->fb == NULL)
3307                 return;
3308 
3309         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3310 
3311         WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3312                                    !intel_crtc_has_pending_flip(crtc),
3313                                    60*HZ) == 0);
3314 
3315         mutex_lock(&dev->struct_mutex);
3316         intel_finish_fb(crtc->primary->fb);
3317         mutex_unlock(&dev->struct_mutex);
3318 }
3319 
3320 /* Program iCLKIP clock to the desired frequency */
3321 static void lpt_program_iclkip(struct drm_crtc *crtc)
3322 {
3323         struct drm_device *dev = crtc->dev;
3324         struct drm_i915_private *dev_priv = dev->dev_private;
3325         int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3326         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3327         u32 temp;
3328 
3329         mutex_lock(&dev_priv->dpio_lock);
3330 
3331         /* It is necessary to ungate the pixclk gate prior to programming
3332          * the divisors, and gate it back when it is done.
3333          */
3334         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3335 
3336         /* Disable SSCCTL */
3337         intel_sbi_write(dev_priv, SBI_SSCCTL6,
3338                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3339                                 SBI_SSCCTL_DISABLE,
3340                         SBI_ICLK);
3341 
3342         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3343         if (clock == 20000) {
3344                 auxdiv = 1;
3345                 divsel = 0x41;
3346                 phaseinc = 0x20;
3347         } else {
3348                 /* The iCLK virtual clock root frequency is in MHz,
3349                  * but the adjusted_mode->crtc_clock in in KHz. To get the
3350                  * divisors, it is necessary to divide one by another, so we
3351                  * convert the virtual clock precision to KHz here for higher
3352                  * precision.
3353                  */
3354                 u32 iclk_virtual_root_freq = 172800 * 1000;
3355                 u32 iclk_pi_range = 64;
3356                 u32 desired_divisor, msb_divisor_value, pi_value;
3357 
3358                 desired_divisor = (iclk_virtual_root_freq / clock);
3359                 msb_divisor_value = desired_divisor / iclk_pi_range;
3360                 pi_value = desired_divisor % iclk_pi_range;
3361 
3362                 auxdiv = 0;
3363                 divsel = msb_divisor_value - 2;
3364                 phaseinc = pi_value;
3365         }
3366 
3367         /* This should not happen with any sane values */
3368         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3369                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3370         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3371                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3372 
3373         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3374                         clock,
3375                         auxdiv,
3376                         divsel,
3377                         phasedir,
3378                         phaseinc);
3379 
3380         /* Program SSCDIVINTPHASE6 */
3381         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3382         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3383         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3384         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3385         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3386         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3387         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3388         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3389 
3390         /* Program SSCAUXDIV */
3391         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3392         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3393         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3394         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3395 
3396         /* Enable modulator and associated divider */
3397         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3398         temp &= ~SBI_SSCCTL_DISABLE;
3399         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3400 
3401         /* Wait for initialization time */
3402         udelay(24);
3403 
3404         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3405 
3406         mutex_unlock(&dev_priv->dpio_lock);
3407 }
3408 
3409 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3410                                                 enum pipe pch_transcoder)
3411 {
3412         struct drm_device *dev = crtc->base.dev;
3413         struct drm_i915_private *dev_priv = dev->dev_private;
3414         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3415 
3416         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3417                    I915_READ(HTOTAL(cpu_transcoder)));
3418         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3419                    I915_READ(HBLANK(cpu_transcoder)));
3420         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3421                    I915_READ(HSYNC(cpu_transcoder)));
3422 
3423         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3424                    I915_READ(VTOTAL(cpu_transcoder)));
3425         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3426                    I915_READ(VBLANK(cpu_transcoder)));
3427         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3428                    I915_READ(VSYNC(cpu_transcoder)));
3429         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3430                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
3431 }
3432 
3433 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3434 {
3435         struct drm_i915_private *dev_priv = dev->dev_private;
3436         uint32_t temp;
3437 
3438         temp = I915_READ(SOUTH_CHICKEN1);
3439         if (temp & FDI_BC_BIFURCATION_SELECT)
3440                 return;
3441 
3442         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3443         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3444 
3445         temp |= FDI_BC_BIFURCATION_SELECT;
3446         DRM_DEBUG_KMS("enabling fdi C rx\n");
3447         I915_WRITE(SOUTH_CHICKEN1, temp);
3448         POSTING_READ(SOUTH_CHICKEN1);
3449 }
3450 
3451 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3452 {
3453         struct drm_device *dev = intel_crtc->base.dev;
3454         struct drm_i915_private *dev_priv = dev->dev_private;
3455 
3456         switch (intel_crtc->pipe) {
3457         case PIPE_A:
3458                 break;
3459         case PIPE_B:
3460                 if (intel_crtc->config.fdi_lanes > 2)
3461                         WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3462                 else
3463                         cpt_enable_fdi_bc_bifurcation(dev);
3464 
3465                 break;
3466         case PIPE_C:
3467                 cpt_enable_fdi_bc_bifurcation(dev);
3468 
3469                 break;
3470         default:
3471                 BUG();
3472         }
3473 }
3474 
3475 /*
3476  * Enable PCH resources required for PCH ports:
3477  *   - PCH PLLs
3478  *   - FDI training & RX/TX
3479  *   - update transcoder timings
3480  *   - DP transcoding bits
3481  *   - transcoder
3482  */
3483 static void ironlake_pch_enable(struct drm_crtc *crtc)
3484 {
3485         struct drm_device *dev = crtc->dev;
3486         struct drm_i915_private *dev_priv = dev->dev_private;
3487         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3488         int pipe = intel_crtc->pipe;
3489         u32 reg, temp;
3490 
3491         assert_pch_transcoder_disabled(dev_priv, pipe);
3492 
3493         if (IS_IVYBRIDGE(dev))
3494                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3495 
3496         /* Write the TU size bits before fdi link training, so that error
3497          * detection works. */
3498         I915_WRITE(FDI_RX_TUSIZE1(pipe),
3499                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3500 
3501         /* For PCH output, training FDI link */
3502         dev_priv->display.fdi_link_train(crtc);
3503 
3504         /* We need to program the right clock selection before writing the pixel
3505          * mutliplier into the DPLL. */
3506         if (HAS_PCH_CPT(dev)) {
3507                 u32 sel;
3508 
3509                 temp = I915_READ(PCH_DPLL_SEL);
3510                 temp |= TRANS_DPLL_ENABLE(pipe);
3511                 sel = TRANS_DPLLB_SEL(pipe);
3512                 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3513                         temp |= sel;
3514                 else
3515                         temp &= ~sel;
3516                 I915_WRITE(PCH_DPLL_SEL, temp);
3517         }
3518 
3519         /* XXX: pch pll's can be enabled any time before we enable the PCH
3520          * transcoder, and we actually should do this to not upset any PCH
3521          * transcoder that already use the clock when we share it.
3522          *
3523          * Note that enable_shared_dpll tries to do the right thing, but
3524          * get_shared_dpll unconditionally resets the pll - we need that to have
3525          * the right LVDS enable sequence. */
3526         intel_enable_shared_dpll(intel_crtc);
3527 
3528         /* set transcoder timing, panel must allow it */
3529         assert_panel_unlocked(dev_priv, pipe);
3530         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3531 
3532         intel_fdi_normal_train(crtc);
3533 
3534         /* For PCH DP, enable TRANS_DP_CTL */
3535         if (HAS_PCH_CPT(dev) &&
3536             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3537              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3538                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3539                 reg = TRANS_DP_CTL(pipe);
3540                 temp = I915_READ(reg);
3541                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3542                           TRANS_DP_SYNC_MASK |
3543                           TRANS_DP_BPC_MASK);
3544                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3545                          TRANS_DP_ENH_FRAMING);
3546                 temp |= bpc << 9; /* same format but at 11:9 */
3547 
3548                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3549                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3550                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3551                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3552 
3553                 switch (intel_trans_dp_port_sel(crtc)) {
3554                 case PCH_DP_B:
3555                         temp |= TRANS_DP_PORT_SEL_B;
3556                         break;
3557                 case PCH_DP_C:
3558                         temp |= TRANS_DP_PORT_SEL_C;
3559                         break;
3560                 case PCH_DP_D:
3561                         temp |= TRANS_DP_PORT_SEL_D;
3562                         break;
3563                 default:
3564                         BUG();
3565                 }
3566 
3567                 I915_WRITE(reg, temp);
3568         }
3569 
3570         ironlake_enable_pch_transcoder(dev_priv, pipe);
3571 }
3572 
3573 static void lpt_pch_enable(struct drm_crtc *crtc)
3574 {
3575         struct drm_device *dev = crtc->dev;
3576         struct drm_i915_private *dev_priv = dev->dev_private;
3577         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3578         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3579 
3580         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3581 
3582         lpt_program_iclkip(crtc);
3583 
3584         /* Set transcoder timing. */
3585         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3586 
3587         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3588 }
3589 
3590 static void intel_put_shared_dpll(struct intel_crtc *crtc)
3591 {
3592         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3593 
3594         if (pll == NULL)
3595                 return;
3596 
3597         if (pll->refcount == 0) {
3598                 WARN(1, "bad %s refcount\n", pll->name);
3599                 return;
3600         }
3601 
3602         if (--pll->refcount == 0) {
3603                 WARN_ON(pll->on);
3604                 WARN_ON(pll->active);
3605         }
3606 
3607         crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3608 }
3609 
3610 static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3611 {
3612         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3613         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3614         enum intel_dpll_id i;
3615 
3616         if (pll) {
3617                 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3618                               crtc->base.base.id, pll->name);
3619                 intel_put_shared_dpll(crtc);
3620         }
3621 
3622         if (HAS_PCH_IBX(dev_priv->dev)) {
3623                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3624                 i = (enum intel_dpll_id) crtc->pipe;
3625                 pll = &dev_priv->shared_dplls[i];
3626 
3627                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3628                               crtc->base.base.id, pll->name);
3629 
3630                 WARN_ON(pll->refcount);
3631 
3632                 goto found;
3633         }
3634 
3635         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3636                 pll = &dev_priv->shared_dplls[i];
3637 
3638                 /* Only want to check enabled timings first */
3639                 if (pll->refcount == 0)
3640                         continue;
3641 
3642                 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3643                            sizeof(pll->hw_state)) == 0) {
3644                         DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3645                                       crtc->base.base.id,
3646                                       pll->name, pll->refcount, pll->active);
3647 
3648                         goto found;
3649                 }
3650         }
3651 
3652         /* Ok no matching timings, maybe there's a free one? */
3653         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3654                 pll = &dev_priv->shared_dplls[i];
3655                 if (pll->refcount == 0) {
3656                         DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3657                                       crtc->base.base.id, pll->name);
3658                         goto found;
3659                 }
3660         }
3661 
3662         return NULL;
3663 
3664 found:
3665         if (pll->refcount == 0)
3666                 pll->hw_state = crtc->config.dpll_hw_state;
3667 
3668         crtc->config.shared_dpll = i;
3669         DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3670                          pipe_name(crtc->pipe));
3671 
3672         pll->refcount++;
3673 
3674         return pll;
3675 }
3676 
3677 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3678 {
3679         struct drm_i915_private *dev_priv = dev->dev_private;
3680         int dslreg = PIPEDSL(pipe);
3681         u32 temp;
3682 
3683         temp = I915_READ(dslreg);
3684         udelay(500);
3685         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3686                 if (wait_for(I915_READ(dslreg) != temp, 5))
3687                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3688         }
3689 }
3690 
3691 static void ironlake_pfit_enable(struct intel_crtc *crtc)
3692 {
3693         struct drm_device *dev = crtc->base.dev;
3694         struct drm_i915_private *dev_priv = dev->dev_private;
3695         int pipe = crtc->pipe;
3696 
3697         if (crtc->config.pch_pfit.enabled) {
3698                 /* Force use of hard-coded filter coefficients
3699                  * as some pre-programmed values are broken,
3700                  * e.g. x201.
3701                  */
3702                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3703                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3704                                                  PF_PIPE_SEL_IVB(pipe));
3705                 else
3706                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3707                 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3708                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3709         }
3710 }
3711 
3712 static void intel_enable_planes(struct drm_crtc *crtc)
3713 {
3714         struct drm_device *dev = crtc->dev;
3715         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3716         struct drm_plane *plane;
3717         struct intel_plane *intel_plane;
3718 
3719         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3720                 intel_plane = to_intel_plane(plane);
3721                 if (intel_plane->pipe == pipe)
3722                         intel_plane_restore(&intel_plane->base);
3723         }
3724 }
3725 
3726 static void intel_disable_planes(struct drm_crtc *crtc)
3727 {
3728         struct drm_device *dev = crtc->dev;
3729         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3730         struct drm_plane *plane;
3731         struct intel_plane *intel_plane;
3732 
3733         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3734                 intel_plane = to_intel_plane(plane);
3735                 if (intel_plane->pipe == pipe)
3736                         intel_plane_disable(&intel_plane->base);
3737         }
3738 }
3739 
3740 void hsw_enable_ips(struct intel_crtc *crtc)
3741 {
3742         struct drm_device *dev = crtc->base.dev;
3743         struct drm_i915_private *dev_priv = dev->dev_private;
3744 
3745         if (!crtc->config.ips_enabled)
3746                 return;
3747 
3748         /* We can only enable IPS after we enable a plane and wait for a vblank */
3749         intel_wait_for_vblank(dev, crtc->pipe);
3750 
3751         assert_plane_enabled(dev_priv, crtc->plane);
3752         if (IS_BROADWELL(dev)) {
3753                 mutex_lock(&dev_priv->rps.hw_lock);
3754                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3755                 mutex_unlock(&dev_priv->rps.hw_lock);
3756                 /* Quoting Art Runyan: "its not safe to expect any particular
3757                  * value in IPS_CTL bit 31 after enabling IPS through the
3758                  * mailbox." Moreover, the mailbox may return a bogus state,
3759                  * so we need to just enable it and continue on.
3760                  */
3761         } else {
3762                 I915_WRITE(IPS_CTL, IPS_ENABLE);
3763                 /* The bit only becomes 1 in the next vblank, so this wait here
3764                  * is essentially intel_wait_for_vblank. If we don't have this
3765                  * and don't wait for vblanks until the end of crtc_enable, then
3766                  * the HW state readout code will complain that the expected
3767                  * IPS_CTL value is not the one we read. */
3768                 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3769                         DRM_ERROR("Timed out waiting for IPS enable\n");
3770         }
3771 }
3772 
3773 void hsw_disable_ips(struct intel_crtc *crtc)
3774 {
3775         struct drm_device *dev = crtc->base.dev;
3776         struct drm_i915_private *dev_priv = dev->dev_private;
3777 
3778         if (!crtc->config.ips_enabled)
3779                 return;
3780 
3781         assert_plane_enabled(dev_priv, crtc->plane);
3782         if (IS_BROADWELL(dev)) {
3783                 mutex_lock(&dev_priv->rps.hw_lock);
3784                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3785                 mutex_unlock(&dev_priv->rps.hw_lock);
3786                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
3787                 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3788                         DRM_ERROR("Timed out waiting for IPS disable\n");
3789         } else {
3790                 I915_WRITE(IPS_CTL, 0);
3791                 POSTING_READ(IPS_CTL);
3792         }
3793 
3794         /* We need to wait for a vblank before we can disable the plane. */
3795         intel_wait_for_vblank(dev, crtc->pipe);
3796 }
3797 
3798 /** Loads the palette/gamma unit for the CRTC with the prepared values */
3799 static void intel_crtc_load_lut(struct drm_crtc *crtc)
3800 {
3801         struct drm_device *dev = crtc->dev;
3802         struct drm_i915_private *dev_priv = dev->dev_private;
3803         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3804         enum pipe pipe = intel_crtc->pipe;
3805         int palreg = PALETTE(pipe);
3806         int i;
3807         bool reenable_ips = false;
3808 
3809         /* The clocks have to be on to load the palette. */
3810         if (!crtc->enabled || !intel_crtc->active)
3811                 return;
3812 
3813         if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3814                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3815                         assert_dsi_pll_enabled(dev_priv);
3816                 else
3817                         assert_pll_enabled(dev_priv, pipe);
3818         }
3819 
3820         /* use legacy palette for Ironlake */
3821         if (HAS_PCH_SPLIT(dev))
3822                 palreg = LGC_PALETTE(pipe);
3823 
3824         /* Workaround : Do not read or write the pipe palette/gamma data while
3825          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3826          */
3827         if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
3828             ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3829              GAMMA_MODE_MODE_SPLIT)) {
3830                 hsw_disable_ips(intel_crtc);
3831                 reenable_ips = true;
3832         }
3833 
3834         for (i = 0; i < 256; i++) {
3835                 I915_WRITE(palreg + 4 * i,
3836                            (intel_crtc->lut_r[i] << 16) |
3837                            (intel_crtc->lut_g[i] << 8) |
3838                            intel_crtc->lut_b[i]);
3839         }
3840 
3841         if (reenable_ips)
3842                 hsw_enable_ips(intel_crtc);
3843 }
3844 
3845 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3846 {
3847         if (!enable && intel_crtc->overlay) {
3848                 struct drm_device *dev = intel_crtc->base.dev;
3849                 struct drm_i915_private *dev_priv = dev->dev_private;
3850 
3851                 mutex_lock(&dev->struct_mutex);
3852                 dev_priv->mm.interruptible = false;
3853                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3854                 dev_priv->mm.interruptible = true;
3855                 mutex_unlock(&dev->struct_mutex);
3856         }
3857 
3858         /* Let userspace switch the overlay on again. In most cases userspace
3859          * has to recompute where to put it anyway.
3860          */
3861 }
3862 
3863 /**
3864  * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3865  * cursor plane briefly if not already running after enabling the display
3866  * plane.
3867  * This workaround avoids occasional blank screens when self refresh is
3868  * enabled.
3869  */
3870 static void
3871 g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3872 {
3873         u32 cntl = I915_READ(CURCNTR(pipe));
3874 
3875         if ((cntl & CURSOR_MODE) == 0) {
3876                 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3877 
3878                 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3879                 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3880                 intel_wait_for_vblank(dev_priv->dev, pipe);
3881                 I915_WRITE(CURCNTR(pipe), cntl);
3882                 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3883                 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3884         }
3885 }
3886 
3887 static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3888 {
3889         struct drm_device *dev = crtc->dev;
3890         struct drm_i915_private *dev_priv = dev->dev_private;
3891         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3892         int pipe = intel_crtc->pipe;
3893         int plane = intel_crtc->plane;
3894 
3895         intel_enable_primary_hw_plane(dev_priv, plane, pipe);
3896         intel_enable_planes(crtc);
3897         /* The fixup needs to happen before cursor is enabled */
3898         if (IS_G4X(dev))
3899                 g4x_fixup_plane(dev_priv, pipe);
3900         intel_crtc_update_cursor(crtc, true);
3901         intel_crtc_dpms_overlay(intel_crtc, true);
3902 
3903         hsw_enable_ips(intel_crtc);
3904 
3905         mutex_lock(&dev->struct_mutex);
3906         intel_update_fbc(dev);
3907         intel_edp_psr_update(dev);
3908         mutex_unlock(&dev->struct_mutex);
3909 }
3910 
3911 static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3912 {
3913         struct drm_device *dev = crtc->dev;
3914         struct drm_i915_private *dev_priv = dev->dev_private;
3915         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3916         int pipe = intel_crtc->pipe;
3917         int plane = intel_crtc->plane;
3918 
3919         intel_crtc_wait_for_pending_flips(crtc);
3920         drm_crtc_vblank_off(crtc);
3921 
3922         if (dev_priv->fbc.plane == plane)
3923                 intel_disable_fbc(dev);
3924 
3925         hsw_disable_ips(intel_crtc);
3926 
3927         intel_crtc_dpms_overlay(intel_crtc, false);
3928         intel_crtc_update_cursor(crtc, false);
3929         intel_disable_planes(crtc);
3930         intel_disable_primary_hw_plane(dev_priv, plane, pipe);
3931 }
3932 
3933 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3934 {
3935         struct drm_device *dev = crtc->dev;
3936         struct drm_i915_private *dev_priv = dev->dev_private;
3937         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3938         struct intel_encoder *encoder;
3939         int pipe = intel_crtc->pipe;
3940         enum plane plane = intel_crtc->plane;
3941 
3942         WARN_ON(!crtc->enabled);
3943 
3944         if (intel_crtc->active)
3945                 return;
3946 
3947         if (intel_crtc->config.has_pch_encoder)
3948                 intel_prepare_shared_dpll(intel_crtc);
3949 
3950         if (intel_crtc->config.has_dp_encoder)
3951                 intel_dp_set_m_n(intel_crtc);
3952 
3953         intel_set_pipe_timings(intel_crtc);
3954 
3955         if (intel_crtc->config.has_pch_encoder) {
3956                 intel_cpu_transcoder_set_m_n(intel_crtc,
3957                                              &intel_crtc->config.fdi_m_n);
3958         }
3959 
3960         ironlake_set_pipeconf(crtc);
3961 
3962         /* Set up the display plane register */
3963         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
3964         POSTING_READ(DSPCNTR(plane));
3965 
3966         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
3967                                                crtc->x, crtc->y);
3968 
3969         intel_crtc->active = true;
3970 
3971         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3972         intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3973 
3974         for_each_encoder_on_crtc(dev, crtc, encoder)
3975                 if (encoder->pre_enable)
3976                         encoder->pre_enable(encoder);
3977 
3978         if (intel_crtc->config.has_pch_encoder) {
3979                 /* Note: FDI PLL enabling _must_ be done before we enable the
3980                  * cpu pipes, hence this is separate from all the other fdi/pch
3981                  * enabling. */
3982                 ironlake_fdi_pll_enable(intel_crtc);
3983         } else {
3984                 assert_fdi_tx_disabled(dev_priv, pipe);
3985                 assert_fdi_rx_disabled(dev_priv, pipe);
3986         }
3987 
3988         ironlake_pfit_enable(intel_crtc);
3989 
3990         /*
3991          * On ILK+ LUT must be loaded before the pipe is running but with
3992          * clocks enabled
3993          */
3994         intel_crtc_load_lut(crtc);
3995 
3996         intel_update_watermarks(crtc);
3997         intel_enable_pipe(intel_crtc);
3998 
3999         if (intel_crtc->config.has_pch_encoder)
4000                 ironlake_pch_enable(crtc);
4001 
4002         for_each_encoder_on_crtc(dev, crtc, encoder)
4003                 encoder->enable(encoder);
4004 
4005         if (HAS_PCH_CPT(dev))
4006                 cpt_verify_modeset(dev, intel_crtc->pipe);
4007 
4008         intel_crtc_enable_planes(crtc);
4009 
4010         drm_crtc_vblank_on(crtc);
4011 }
4012 
4013 /* IPS only exists on ULT machines and is tied to pipe A. */
4014 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4015 {
4016         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4017 }
4018 
4019 /*
4020  * This implements the workaround described in the "notes" section of the mode
4021  * set sequence documentation. When going from no pipes or single pipe to
4022  * multiple pipes, and planes are enabled after the pipe, we need to wait at
4023  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4024  */
4025 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4026 {
4027         struct drm_device *dev = crtc->base.dev;
4028         struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4029 
4030         /* We want to get the other_active_crtc only if there's only 1 other
4031          * active crtc. */
4032         for_each_intel_crtc(dev, crtc_it) {
4033                 if (!crtc_it->active || crtc_it == crtc)
4034                         continue;
4035 
4036                 if (other_active_crtc)
4037                         return;
4038 
4039                 other_active_crtc = crtc_it;
4040         }
4041         if (!other_active_crtc)
4042                 return;
4043 
4044         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4045         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4046 }
4047 
4048 static void haswell_crtc_enable(struct drm_crtc *crtc)
4049 {
4050         struct drm_device *dev = crtc->dev;
4051         struct drm_i915_private *dev_priv = dev->dev_private;
4052         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4053         struct intel_encoder *encoder;
4054         int pipe = intel_crtc->pipe;
4055         enum plane plane = intel_crtc->plane;
4056 
4057         WARN_ON(!crtc->enabled);
4058 
4059         if (intel_crtc->active)
4060                 return;
4061 
4062         if (intel_crtc->config.has_dp_encoder)
4063                 intel_dp_set_m_n(intel_crtc);
4064 
4065         intel_set_pipe_timings(intel_crtc);
4066 
4067         if (intel_crtc->config.has_pch_encoder) {
4068                 intel_cpu_transcoder_set_m_n(intel_crtc,
4069                                              &intel_crtc->config.fdi_m_n);
4070         }
4071 
4072         haswell_set_pipeconf(crtc);
4073 
4074         intel_set_pipe_csc(crtc);
4075 
4076         /* Set up the display plane register */
4077         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4078         POSTING_READ(DSPCNTR(plane));
4079 
4080         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4081                                                crtc->x, crtc->y);
4082 
4083         intel_crtc->active = true;
4084 
4085         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4086         if (intel_crtc->config.has_pch_encoder)
4087                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4088 
4089         if (intel_crtc->config.has_pch_encoder)
4090                 dev_priv->display.fdi_link_train(crtc);
4091 
4092         for_each_encoder_on_crtc(dev, crtc, encoder)
4093                 if (encoder->pre_enable)
4094                         encoder->pre_enable(encoder);
4095 
4096         intel_ddi_enable_pipe_clock(intel_crtc);
4097 
4098         ironlake_pfit_enable(intel_crtc);
4099 
4100         /*
4101          * On ILK+ LUT must be loaded before the pipe is running but with
4102          * clocks enabled
4103          */
4104         intel_crtc_load_lut(crtc);
4105 
4106         intel_ddi_set_pipe_settings(crtc);
4107         intel_ddi_enable_transcoder_func(crtc);
4108 
4109         intel_update_watermarks(crtc);
4110         intel_enable_pipe(intel_crtc);
4111 
4112         if (intel_crtc->config.has_pch_encoder)
4113                 lpt_pch_enable(crtc);
4114 
4115         for_each_encoder_on_crtc(dev, crtc, encoder) {
4116                 encoder->enable(encoder);
4117                 intel_opregion_notify_encoder(encoder, true);
4118         }
4119 
4120         /* If we change the relative order between pipe/planes enabling, we need
4121          * to change the workaround. */
4122         haswell_mode_set_planes_workaround(intel_crtc);
4123         intel_crtc_enable_planes(crtc);
4124 
4125         drm_crtc_vblank_on(crtc);
4126 }
4127 
4128 static void ironlake_pfit_disable(struct intel_crtc *crtc)
4129 {
4130         struct drm_device *dev = crtc->base.dev;
4131         struct drm_i915_private *dev_priv = dev->dev_private;
4132         int pipe = crtc->pipe;
4133 
4134         /* To avoid upsetting the power well on haswell only disable the pfit if
4135          * it's in use. The hw state code will make sure we get this right. */
4136         if (crtc->config.pch_pfit.enabled) {
4137                 I915_WRITE(PF_CTL(pipe), 0);
4138                 I915_WRITE(PF_WIN_POS(pipe), 0);
4139                 I915_WRITE(PF_WIN_SZ(pipe), 0);
4140         }
4141 }
4142 
4143 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4144 {
4145         struct drm_device *dev = crtc->dev;
4146         struct drm_i915_private *dev_priv = dev->dev_private;
4147         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4148         struct intel_encoder *encoder;
4149         int pipe = intel_crtc->pipe;
4150         u32 reg, temp;
4151 
4152         if (!intel_crtc->active)
4153                 return;
4154 
4155         intel_crtc_disable_planes(crtc);
4156 
4157         for_each_encoder_on_crtc(dev, crtc, encoder)
4158                 encoder->disable(encoder);
4159 
4160         if (intel_crtc->config.has_pch_encoder)
4161                 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4162 
4163         intel_disable_pipe(dev_priv, pipe);
4164 
4165         ironlake_pfit_disable(intel_crtc);
4166 
4167         for_each_encoder_on_crtc(dev, crtc, encoder)
4168                 if (encoder->post_disable)
4169                         encoder->post_disable(encoder);
4170 
4171         if (intel_crtc->config.has_pch_encoder) {
4172                 ironlake_fdi_disable(crtc);
4173 
4174                 ironlake_disable_pch_transcoder(dev_priv, pipe);
4175                 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4176 
4177                 if (HAS_PCH_CPT(dev)) {
4178                         /* disable TRANS_DP_CTL */
4179                         reg = TRANS_DP_CTL(pipe);
4180                         temp = I915_READ(reg);
4181                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4182                                   TRANS_DP_PORT_SEL_MASK);
4183                         temp |= TRANS_DP_PORT_SEL_NONE;
4184                         I915_WRITE(reg, temp);
4185 
4186                         /* disable DPLL_SEL */
4187                         temp = I915_READ(PCH_DPLL_SEL);
4188                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4189                         I915_WRITE(PCH_DPLL_SEL, temp);
4190                 }
4191 
4192                 /* disable PCH DPLL */
4193                 intel_disable_shared_dpll(intel_crtc);
4194 
4195                 ironlake_fdi_pll_disable(intel_crtc);
4196         }
4197 
4198         intel_crtc->active = false;
4199         intel_update_watermarks(crtc);
4200 
4201         mutex_lock(&dev->struct_mutex);
4202         intel_update_fbc(dev);
4203         intel_edp_psr_update(dev);
4204         mutex_unlock(&dev->struct_mutex);
4205 }
4206 
4207 static void haswell_crtc_disable(struct drm_crtc *crtc)
4208 {
4209         struct drm_device *dev = crtc->dev;
4210         struct drm_i915_private *dev_priv = dev->dev_private;
4211         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4212         struct intel_encoder *encoder;
4213         int pipe = intel_crtc->pipe;
4214         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4215 
4216         if (!intel_crtc->active)
4217                 return;
4218 
4219         intel_crtc_disable_planes(crtc);
4220 
4221         for_each_encoder_on_crtc(dev, crtc, encoder) {
4222                 intel_opregion_notify_encoder(encoder, false);
4223                 encoder->disable(encoder);
4224         }
4225 
4226         if (intel_crtc->config.has_pch_encoder)
4227                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4228         intel_disable_pipe(dev_priv, pipe);
4229 
4230         intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4231 
4232         ironlake_pfit_disable(intel_crtc);
4233 
4234         intel_ddi_disable_pipe_clock(intel_crtc);
4235 
4236         for_each_encoder_on_crtc(dev, crtc, encoder)
4237                 if (encoder->post_disable)
4238                         encoder->post_disable(encoder);
4239 
4240         if (intel_crtc->config.has_pch_encoder) {
4241                 lpt_disable_pch_transcoder(dev_priv);
4242                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4243                 intel_ddi_fdi_disable(crtc);
4244         }
4245 
4246         intel_crtc->active = false;
4247         intel_update_watermarks(crtc);
4248 
4249         mutex_lock(&dev->struct_mutex);
4250         intel_update_fbc(dev);
4251         intel_edp_psr_update(dev);
4252         mutex_unlock(&dev->struct_mutex);
4253 }
4254 
4255 static void ironlake_crtc_off(struct drm_crtc *crtc)
4256 {
4257         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4258         intel_put_shared_dpll(intel_crtc);
4259 }
4260 
4261 static void haswell_crtc_off(struct drm_crtc *crtc)
4262 {
4263         intel_ddi_put_crtc_pll(crtc);
4264 }
4265 
4266 static void i9xx_pfit_enable(struct intel_crtc *crtc)
4267 {
4268         struct drm_device *dev = crtc->base.dev;
4269         struct drm_i915_private *dev_priv = dev->dev_private;
4270         struct intel_crtc_config *pipe_config = &crtc->config;
4271 
4272         if (!crtc->config.gmch_pfit.control)
4273                 return;
4274 
4275         /*
4276          * The panel fitter should only be adjusted whilst the pipe is disabled,
4277          * according to register description and PRM.
4278          */
4279         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4280         assert_pipe_disabled(dev_priv, crtc->pipe);
4281 
4282         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4283         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4284 
4285         /* Border color in case we don't scale up to the full screen. Black by
4286          * default, change to something else for debugging. */
4287         I915_WRITE(BCLRPAT(crtc->pipe), 0);
4288 }
4289 
4290 #define for_each_power_domain(domain, mask)                             \
4291         for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
4292                 if ((1 << (domain)) & (mask))
4293 
4294 enum intel_display_power_domain
4295 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4296 {
4297         struct drm_device *dev = intel_encoder->base.dev;
4298         struct intel_digital_port *intel_dig_port;
4299 
4300         switch (intel_encoder->type) {
4301         case INTEL_OUTPUT_UNKNOWN:
4302                 /* Only DDI platforms should ever use this output type */
4303                 WARN_ON_ONCE(!HAS_DDI(dev));
4304         case INTEL_OUTPUT_DISPLAYPORT:
4305         case INTEL_OUTPUT_HDMI:
4306         case INTEL_OUTPUT_EDP:
4307                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4308                 switch (intel_dig_port->port) {
4309                 case PORT_A:
4310                         return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4311                 case PORT_B:
4312                         return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4313                 case PORT_C:
4314                         return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4315                 case PORT_D:
4316                         return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4317                 default:
4318                         WARN_ON_ONCE(1);
4319                         return POWER_DOMAIN_PORT_OTHER;
4320                 }
4321         case INTEL_OUTPUT_ANALOG:
4322                 return POWER_DOMAIN_PORT_CRT;
4323         case INTEL_OUTPUT_DSI:
4324                 return POWER_DOMAIN_PORT_DSI;
4325         default:
4326                 return POWER_DOMAIN_PORT_OTHER;
4327         }
4328 }
4329 
4330 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4331 {
4332         struct drm_device *dev = crtc->dev;
4333         struct intel_encoder *intel_encoder;
4334         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4335         enum pipe pipe = intel_crtc->pipe;
4336         bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
4337         unsigned long mask;
4338         enum transcoder transcoder;
4339 
4340         transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4341 
4342         mask = BIT(POWER_DOMAIN_PIPE(pipe));
4343         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4344         if (pfit_enabled)
4345                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4346 
4347         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4348                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
4349 
4350         return mask;
4351 }
4352 
4353 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4354                                   bool enable)
4355 {
4356         if (dev_priv->power_domains.init_power_on == enable)
4357                 return;
4358 
4359         if (enable)
4360                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4361         else
4362                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4363 
4364         dev_priv->power_domains.init_power_on = enable;
4365 }
4366 
4367 static void modeset_update_crtc_power_domains(struct drm_device *dev)
4368 {
4369         struct drm_i915_private *dev_priv = dev->dev_private;
4370         unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4371         struct intel_crtc *crtc;
4372 
4373         /*
4374          * First get all needed power domains, then put all unneeded, to avoid
4375          * any unnecessary toggling of the power wells.
4376          */
4377         for_each_intel_crtc(dev, crtc) {
4378                 enum intel_display_power_domain domain;
4379 
4380                 if (!crtc->base.enabled)
4381                         continue;
4382 
4383                 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4384 
4385                 for_each_power_domain(domain, pipe_domains[crtc->pipe])
4386                         intel_display_power_get(dev_priv, domain);
4387         }
4388 
4389         for_each_intel_crtc(dev, crtc) {
4390                 enum intel_display_power_domain domain;
4391 
4392                 for_each_power_domain(domain, crtc->enabled_power_domains)
4393                         intel_display_power_put(dev_priv, domain);
4394 
4395                 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4396         }
4397 
4398         intel_display_set_init_power(dev_priv, false);
4399 }
4400 
4401 int valleyview_get_vco(struct drm_i915_private *dev_priv)
4402 {
4403         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4404 
4405         /* Obtain SKU information */
4406         mutex_lock(&dev_priv->dpio_lock);
4407         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4408                 CCK_FUSE_HPLL_FREQ_MASK;
4409         mutex_unlock(&dev_priv->dpio_lock);
4410 
4411         return vco_freq[hpll_freq];
4412 }
4413 
4414 /* Adjust CDclk dividers to allow high res or save power if possible */
4415 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4416 {
4417         struct drm_i915_private *dev_priv = dev->dev_private;
4418         u32 val, cmd;
4419 
4420         WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
4421         dev_priv->vlv_cdclk_freq = cdclk;
4422 
4423         if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
4424                 cmd = 2;
4425         else if (cdclk == 266)
4426                 cmd = 1;
4427         else
4428                 cmd = 0;
4429 
4430         mutex_lock(&dev_priv->rps.hw_lock);
4431         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4432         val &= ~DSPFREQGUAR_MASK;
4433         val |= (cmd << DSPFREQGUAR_SHIFT);
4434         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4435         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4436                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4437                      50)) {
4438                 DRM_ERROR("timed out waiting for CDclk change\n");
4439         }
4440         mutex_unlock(&dev_priv->rps.hw_lock);
4441 
4442         if (cdclk == 400) {
4443                 u32 divider, vco;
4444 
4445                 vco = valleyview_get_vco(dev_priv);
4446                 divider = ((vco << 1) / cdclk) - 1;
4447 
4448                 mutex_lock(&dev_priv->dpio_lock);
4449                 /* adjust cdclk divider */
4450                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4451                 val &= ~0xf;
4452                 val |= divider;
4453                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4454                 mutex_unlock(&dev_priv->dpio_lock);
4455         }
4456 
4457         mutex_lock(&dev_priv->dpio_lock);
4458         /* adjust self-refresh exit latency value */
4459         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4460         val &= ~0x7f;
4461 
4462         /*
4463          * For high bandwidth configs, we set a higher latency in the bunit
4464          * so that the core display fetch happens in time to avoid underruns.
4465          */
4466         if (cdclk == 400)
4467                 val |= 4500 / 250; /* 4.5 usec */
4468         else
4469                 val |= 3000 / 250; /* 3.0 usec */
4470         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4471         mutex_unlock(&dev_priv->dpio_lock);
4472 
4473         /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
4474         intel_i2c_reset(dev);
4475 }
4476 
4477 int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
4478 {
4479         int cur_cdclk, vco;
4480         int divider;
4481 
4482         vco = valleyview_get_vco(dev_priv);
4483 
4484         mutex_lock(&dev_priv->dpio_lock);
4485         divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4486         mutex_unlock(&dev_priv->dpio_lock);
4487 
4488         divider &= 0xf;
4489 
4490         cur_cdclk = (vco << 1) / (divider + 1);
4491 
4492         return cur_cdclk;
4493 }
4494 
4495 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4496                                  int max_pixclk)
4497 {
4498         /*
4499          * Really only a few cases to deal with, as only 4 CDclks are supported:
4500          *   200MHz
4501          *   267MHz
4502          *   320MHz
4503          *   400MHz
4504          * So we check to see whether we're above 90% of the lower bin and
4505          * adjust if needed.
4506          */
4507         if (max_pixclk > 288000) {
4508                 return 400;
4509         } else if (max_pixclk > 240000) {
4510                 return 320;
4511         } else
4512                 return 266;
4513         /* Looks like the 200MHz CDclk freq doesn't work on some configs */
4514 }
4515 
4516 /* compute the max pixel clock for new configuration */
4517 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4518 {
4519         struct drm_device *dev = dev_priv->dev;
4520         struct intel_crtc *intel_crtc;
4521         int max_pixclk = 0;
4522 
4523         for_each_intel_crtc(dev, intel_crtc) {
4524                 if (intel_crtc->new_enabled)
4525                         max_pixclk = max(max_pixclk,
4526                                          intel_crtc->new_config->adjusted_mode.crtc_clock);
4527         }
4528 
4529         return max_pixclk;
4530 }
4531 
4532 static void valleyview_modeset_global_pipes(struct drm_device *dev,
4533                                             unsigned *prepare_pipes)
4534 {
4535         struct drm_i915_private *dev_priv = dev->dev_private;
4536         struct intel_crtc *intel_crtc;
4537         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4538 
4539         if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4540             dev_priv->vlv_cdclk_freq)
4541                 return;
4542 
4543         /* disable/enable all currently active pipes while we change cdclk */
4544         for_each_intel_crtc(dev, intel_crtc)
4545                 if (intel_crtc->base.enabled)
4546                         *prepare_pipes |= (1 << intel_crtc->pipe);
4547 }
4548 
4549 static void valleyview_modeset_global_resources(struct drm_device *dev)
4550 {
4551         struct drm_i915_private *dev_priv = dev->dev_private;
4552         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4553         int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4554 
4555         if (req_cdclk != dev_priv->vlv_cdclk_freq)
4556                 valleyview_set_cdclk(dev, req_cdclk);
4557         modeset_update_crtc_power_domains(dev);
4558 }
4559 
4560 static void valleyview_crtc_enable(struct drm_crtc *crtc)
4561 {
4562         struct drm_device *dev = crtc->dev;
4563         struct drm_i915_private *dev_priv = dev->dev_private;
4564         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4565         struct intel_encoder *encoder;
4566         int pipe = intel_crtc->pipe;
4567         int plane = intel_crtc->plane;
4568         bool is_dsi;
4569         u32 dspcntr;
4570 
4571         WARN_ON(!crtc->enabled);
4572 
4573         if (intel_crtc->active)
4574                 return;
4575 
4576         is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4577 
4578         if (!is_dsi && !IS_CHERRYVIEW(dev))
4579                 vlv_prepare_pll(intel_crtc);
4580 
4581         /* Set up the display plane register */
4582         dspcntr = DISPPLANE_GAMMA_ENABLE;
4583 
4584         if (intel_crtc->config.has_dp_encoder)
4585                 intel_dp_set_m_n(intel_crtc);
4586 
4587         intel_set_pipe_timings(intel_crtc);
4588 
4589         /* pipesrc and dspsize control the size that is scaled from,
4590          * which should always be the user's requested size.
4591          */
4592         I915_WRITE(DSPSIZE(plane),
4593                    ((intel_crtc->config.pipe_src_h - 1) << 16) |
4594                    (intel_crtc->config.pipe_src_w - 1));
4595         I915_WRITE(DSPPOS(plane), 0);
4596 
4597         i9xx_set_pipeconf(intel_crtc);
4598 
4599         I915_WRITE(DSPCNTR(plane), dspcntr);
4600         POSTING_READ(DSPCNTR(plane));
4601 
4602         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4603                                                crtc->x, crtc->y);
4604 
4605         intel_crtc->active = true;
4606 
4607         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4608 
4609         for_each_encoder_on_crtc(dev, crtc, encoder)
4610                 if (encoder->pre_pll_enable)
4611                         encoder->pre_pll_enable(encoder);
4612 
4613         if (!is_dsi) {
4614                 if (IS_CHERRYVIEW(dev))
4615                         chv_enable_pll(intel_crtc);
4616                 else
4617                         vlv_enable_pll(intel_crtc);
4618         }
4619 
4620         for_each_encoder_on_crtc(dev, crtc, encoder)
4621                 if (encoder->pre_enable)
4622                         encoder->pre_enable(encoder);
4623 
4624         i9xx_pfit_enable(intel_crtc);
4625 
4626         intel_crtc_load_lut(crtc);
4627 
4628         intel_update_watermarks(crtc);
4629         intel_enable_pipe(intel_crtc);
4630 
4631         for_each_encoder_on_crtc(dev, crtc, encoder)
4632                 encoder->enable(encoder);
4633 
4634         intel_crtc_enable_planes(crtc);
4635 
4636         drm_crtc_vblank_on(crtc);
4637 
4638         /* Underruns don't raise interrupts, so check manually. */
4639         i9xx_check_fifo_underruns(dev);
4640 }
4641 
4642 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4643 {
4644         struct drm_device *dev = crtc->base.dev;
4645         struct drm_i915_private *dev_priv = dev->dev_private;
4646 
4647         I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4648         I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4649 }
4650 
4651 static void i9xx_crtc_enable(struct drm_crtc *crtc)
4652 {
4653         struct drm_device *dev = crtc->dev;
4654         struct drm_i915_private *dev_priv = dev->dev_private;
4655         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4656         struct intel_encoder *encoder;
4657         int pipe = intel_crtc->pipe;
4658         int plane = intel_crtc->plane;
4659         u32 dspcntr;
4660 
4661         WARN_ON(!crtc->enabled);
4662 
4663         if (intel_crtc->active)
4664                 return;
4665 
4666         i9xx_set_pll_dividers(intel_crtc);
4667 
4668         /* Set up the display plane register */
4669         dspcntr = DISPPLANE_GAMMA_ENABLE;
4670 
4671         if (pipe == 0)
4672                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4673         else
4674                 dspcntr |= DISPPLANE_SEL_PIPE_B;
4675 
4676         if (intel_crtc->config.has_dp_encoder)
4677                 intel_dp_set_m_n(intel_crtc);
4678 
4679         intel_set_pipe_timings(intel_crtc);
4680 
4681         /* pipesrc and dspsize control the size that is scaled from,
4682          * which should always be the user's requested size.
4683          */
4684         I915_WRITE(DSPSIZE(plane),
4685                    ((intel_crtc->config.pipe_src_h - 1) << 16) |
4686                    (intel_crtc->config.pipe_src_w - 1));
4687         I915_WRITE(DSPPOS(plane), 0);
4688 
4689         i9xx_set_pipeconf(intel_crtc);
4690 
4691         I915_WRITE(DSPCNTR(plane), dspcntr);
4692         POSTING_READ(DSPCNTR(plane));
4693 
4694         dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4695                                                crtc->x, crtc->y);
4696 
4697         intel_crtc->active = true;
4698 
4699         if (!IS_GEN2(dev))
4700                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4701 
4702         for_each_encoder_on_crtc(dev, crtc, encoder)
4703                 if (encoder->pre_enable)
4704                         encoder->pre_enable(encoder);
4705 
4706         i9xx_enable_pll(intel_crtc);
4707 
4708         i9xx_pfit_enable(intel_crtc);
4709 
4710         intel_crtc_load_lut(crtc);
4711 
4712         intel_update_watermarks(crtc);
4713         intel_enable_pipe(intel_crtc);
4714 
4715         for_each_encoder_on_crtc(dev, crtc, encoder)
4716                 encoder->enable(encoder);
4717 
4718         intel_crtc_enable_planes(crtc);
4719 
4720         /*
4721          * Gen2 reports pipe underruns whenever all planes are disabled.
4722          * So don't enable underrun reporting before at least some planes
4723          * are enabled.
4724          * FIXME: Need to fix the logic to work when we turn off all planes
4725          * but leave the pipe running.
4726          */
4727         if (IS_GEN2(dev))
4728                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4729 
4730         drm_crtc_vblank_on(crtc);
4731 
4732         /* Underruns don't raise interrupts, so check manually. */
4733         i9xx_check_fifo_underruns(dev);
4734 }
4735 
4736 static void i9xx_pfit_disable(struct intel_crtc *crtc)
4737 {
4738         struct drm_device *dev = crtc->base.dev;
4739         struct drm_i915_private *dev_priv = dev->dev_private;
4740 
4741         if (!crtc->config.gmch_pfit.control)
4742                 return;
4743 
4744         assert_pipe_disabled(dev_priv, crtc->pipe);
4745 
4746         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4747                          I915_READ(PFIT_CONTROL));
4748         I915_WRITE(PFIT_CONTROL, 0);
4749 }
4750 
4751 static void i9xx_crtc_disable(struct drm_crtc *crtc)
4752 {
4753         struct drm_device *dev = crtc->dev;
4754         struct drm_i915_private *dev_priv = dev->dev_private;
4755         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4756         struct intel_encoder *encoder;
4757         int pipe = intel_crtc->pipe;
4758 
4759         if (!intel_crtc->active)
4760                 return;
4761 
4762         /*
4763          * Gen2 reports pipe underruns whenever all planes are disabled.
4764          * So diasble underrun reporting before all the planes get disabled.
4765          * FIXME: Need to fix the logic to work when we turn off all planes
4766          * but leave the pipe running.
4767          */
4768         if (IS_GEN2(dev))
4769                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4770 
4771         intel_crtc_disable_planes(crtc);
4772 
4773         for_each_encoder_on_crtc(dev, crtc, encoder)
4774                 encoder->disable(encoder);
4775 
4776         /*
4777          * On gen2 planes are double buffered but the pipe isn't, so we must
4778          * wait for planes to fully turn off before disabling the pipe.
4779          */
4780         if (IS_GEN2(dev))
4781                 intel_wait_for_vblank(dev, pipe);
4782 
4783         intel_disable_pipe(dev_priv, pipe);
4784 
4785         i9xx_pfit_disable(intel_crtc);
4786 
4787         for_each_encoder_on_crtc(dev, crtc, encoder)
4788                 if (encoder->post_disable)
4789                         encoder->post_disable(encoder);
4790 
4791         if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
4792                 if (IS_CHERRYVIEW(dev))
4793                         chv_disable_pll(dev_priv, pipe);
4794                 else if (IS_VALLEYVIEW(dev))
4795                         vlv_disable_pll(dev_priv, pipe);
4796                 else
4797                         i9xx_disable_pll(dev_priv, pipe);
4798         }
4799 
4800         if (!IS_GEN2(dev))
4801                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4802 
4803         intel_crtc->active = false;
4804         intel_update_watermarks(crtc);
4805 
4806         mutex_lock(&dev->struct_mutex);
4807         intel_update_fbc(dev);
4808         intel_edp_psr_update(dev);
4809         mutex_unlock(&dev->struct_mutex);
4810 }
4811 
4812 static void i9xx_crtc_off(struct drm_crtc *crtc)
4813 {
4814 }
4815 
4816 static void intel_crtc_update_sarea(struct drm_crtc *crtc,
4817                                     bool enabled)
4818 {
4819         struct drm_device *dev = crtc->dev;
4820         struct drm_i915_master_private *master_priv;
4821         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4822         int pipe = intel_crtc->pipe;
4823 
4824         if (!dev->primary->master)
4825                 return;
4826 
4827         master_priv = dev->primary->master->driver_priv;
4828         if (!master_priv->sarea_priv)
4829                 return;
4830 
4831         switch (pipe) {
4832         case 0:
4833                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
4834                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
4835                 break;
4836         case 1:
4837                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
4838                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
4839                 break;
4840         default:
4841                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
4842                 break;
4843         }
4844 }
4845 
4846 /**
4847  * Sets the power management mode of the pipe and plane.
4848  */
4849 void intel_crtc_update_dpms(struct drm_crtc *crtc)
4850 {
4851         struct drm_device *dev = crtc->dev;
4852         struct drm_i915_private *dev_priv = dev->dev_private;
4853         struct intel_encoder *intel_encoder;
4854         bool enable = false;
4855 
4856         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4857                 enable |= intel_encoder->connectors_active;
4858 
4859         if (enable)
4860                 dev_priv->display.crtc_enable(crtc);
4861         else
4862                 dev_priv->display.crtc_disable(crtc);
4863 
4864         intel_crtc_update_sarea(crtc, enable);
4865 }
4866 
4867 static void intel_crtc_disable(struct drm_crtc *crtc)
4868 {
4869         struct drm_device *dev = crtc->dev;
4870         struct drm_connector *connector;
4871         struct drm_i915_private *dev_priv = dev->dev_private;
4872 
4873         /* crtc should still be enabled when we disable it. */
4874         WARN_ON(!crtc->enabled);
4875 
4876         dev_priv->display.crtc_disable(crtc);
4877         intel_crtc_update_sarea(crtc, false);
4878         dev_priv->display.off(crtc);
4879 
4880         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
4881         assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
4882         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
4883 
4884         if (crtc->primary->fb) {
4885                 mutex_lock(&dev->struct_mutex);
4886                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
4887                 mutex_unlock(&dev->struct_mutex);
4888                 crtc->primary->fb = NULL;
4889         }
4890 
4891         /* Update computed state. */
4892         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4893                 if (!connector->encoder || !connector->encoder->crtc)
4894                         continue;
4895 
4896                 if (connector->encoder->crtc != crtc)
4897                         continue;
4898 
4899                 connector->dpms = DRM_MODE_DPMS_OFF;
4900                 to_intel_encoder(connector->encoder)->connectors_active = false;
4901         }
4902 }
4903 
4904 void intel_encoder_destroy(struct drm_encoder *encoder)
4905 {
4906         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4907 
4908         drm_encoder_cleanup(encoder);
4909         kfree(intel_encoder);
4910 }
4911 
4912 /* Simple dpms helper for encoders with just one connector, no cloning and only
4913  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4914  * state of the entire output pipe. */
4915 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
4916 {
4917         if (mode == DRM_MODE_DPMS_ON) {
4918                 encoder->connectors_active = true;
4919 
4920                 intel_crtc_update_dpms(encoder->base.crtc);
4921         } else {
4922                 encoder->connectors_active = false;
4923 
4924                 intel_crtc_update_dpms(encoder->base.crtc);
4925         }
4926 }
4927 
4928 /* Cross check the actual hw state with our own modeset state tracking (and it's
4929  * internal consistency). */
4930 static void intel_connector_check_state(struct intel_connector *connector)
4931 {
4932         if (connector->get_hw_state(connector)) {
4933                 struct intel_encoder *encoder = connector->encoder;
4934                 struct drm_crtc *crtc;
4935                 bool encoder_enabled;
4936                 enum pipe pipe;
4937 
4938                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4939                               connector->base.base.id,
4940                               connector->base.name);
4941 
4942                 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4943                      "wrong connector dpms state\n");
4944                 WARN(connector->base.encoder != &encoder->base,
4945                      "active connector not linked to encoder\n");
4946                 WARN(!encoder->connectors_active,
4947                      "encoder->connectors_active not set\n");
4948 
4949                 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
4950                 WARN(!encoder_enabled, "encoder not enabled\n");
4951                 if (WARN_ON(!encoder->base.crtc))
4952                         return;
4953 
4954                 crtc = encoder->base.crtc;
4955 
4956                 WARN(!crtc->enabled, "crtc not enabled\n");
4957                 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4958                 WARN(pipe != to_intel_crtc(crtc)->pipe,
4959                      "encoder active on the wrong pipe\n");
4960         }
4961 }
4962 
4963 /* Even simpler default implementation, if there's really no special case to
4964  * consider. */
4965 void intel_connector_dpms(struct drm_connector *connector, int mode)
4966 {
4967         /* All the simple cases only support two dpms states. */
4968         if (mode != DRM_MODE_DPMS_ON)
4969                 mode = DRM_MODE_DPMS_OFF;
4970 
4971         if (mode == connector->dpms)
4972                 return;
4973 
4974         connector->dpms = mode;
4975 
4976         /* Only need to change hw state when actually enabled */
4977         if (connector->encoder)
4978                 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
4979 
4980         intel_modeset_check_state(connector->dev);
4981 }
4982 
4983 /* Simple connector->get_hw_state implementation for encoders that support only
4984  * one connector and no cloning and hence the encoder state determines the state
4985  * of the connector. */
4986 bool intel_connector_get_hw_state(struct intel_connector *connector)
4987 {
4988         enum pipe pipe = 0;
4989         struct intel_encoder *encoder = connector->encoder;
4990 
4991         return encoder->get_hw_state(encoder, &pipe);
4992 }
4993 
4994 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4995                                      struct intel_crtc_config *pipe_config)
4996 {
4997         struct drm_i915_private *dev_priv = dev->dev_private;
4998         struct intel_crtc *pipe_B_crtc =
4999                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5000 
5001         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5002                       pipe_name(pipe), pipe_config->fdi_lanes);
5003         if (pipe_config->fdi_lanes > 4) {
5004                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5005                               pipe_name(pipe), pipe_config->fdi_lanes);
5006                 return false;
5007         }
5008 
5009         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5010                 if (pipe_config->fdi_lanes > 2) {
5011                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5012                                       pipe_config->fdi_lanes);
5013                         return false;
5014                 } else {
5015                         return true;
5016                 }
5017         }
5018 
5019         if (INTEL_INFO(dev)->num_pipes == 2)
5020                 return true;
5021 
5022         /* Ivybridge 3 pipe is really complicated */
5023         switch (pipe) {
5024         case PIPE_A:
5025                 return true;
5026         case PIPE_B:
5027                 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5028                     pipe_config->fdi_lanes > 2) {
5029                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5030                                       pipe_name(pipe), pipe_config->fdi_lanes);
5031                         return false;
5032                 }
5033                 return true;
5034         case PIPE_C:
5035                 if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5036                     pipe_B_crtc->config.fdi_lanes <= 2) {
5037                         if (pipe_config->fdi_lanes > 2) {
5038                                 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5039                                               pipe_name(pipe), pipe_config->fdi_lanes);
5040                                 return false;
5041                         }
5042                 } else {
5043                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5044                         return false;
5045                 }
5046                 return true;
5047         default:
5048                 BUG();
5049         }
5050 }
5051 
5052 #define RETRY 1
5053 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5054                                        struct intel_crtc_config *pipe_config)
5055 {
5056         struct drm_device *dev = intel_crtc->base.dev;
5057         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5058         int lane, link_bw, fdi_dotclock;
5059         bool setup_ok, needs_recompute = false;
5060 
5061 retry:
5062         /* FDI is a binary signal running at ~2.7GHz, encoding
5063          * each output octet as 10 bits. The actual frequency
5064          * is stored as a divider into a 100MHz clock, and the
5065          * mode pixel clock is stored in units of 1KHz.
5066          * Hence the bw of each lane in terms of the mode signal
5067          * is:
5068          */
5069         link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5070 
5071         fdi_dotclock = adjusted_mode->crtc_clock;
5072 
5073         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5074                                            pipe_config->pipe_bpp);
5075 
5076         pipe_config->fdi_lanes = lane;
5077 
5078         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5079                                link_bw, &pipe_config->fdi_m_n);
5080 
5081         setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5082                                             intel_crtc->pipe, pipe_config);
5083         if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5084                 pipe_config->pipe_bpp -= 2*3;
5085                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5086                               pipe_config->pipe_bpp);
5087                 needs_recompute = true;
5088                 pipe_config->bw_constrained = true;
5089 
5090                 goto retry;
5091         }
5092 
5093         if (needs_recompute)
5094                 return RETRY;
5095 
5096         return setup_ok ? 0 : -EINVAL;
5097 }
5098 
5099 static void hsw_compute_ips_config(struct intel_crtc *crtc,
5100                                    struct intel_crtc_config *pipe_config)
5101 {
5102         pipe_config->ips_enabled = i915.enable_ips &&
5103                                    hsw_crtc_supports_ips(crtc) &&
5104                                    pipe_config->pipe_bpp <= 24;
5105 }
5106 
5107 static int intel_crtc_compute_config(struct intel_crtc *crtc,
5108                                      struct intel_crtc_config *pipe_config)
5109 {
5110         struct drm_device *dev = crtc->base.dev;
5111         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5112 
5113         /* FIXME should check pixel clock limits on all platforms */
5114         if (INTEL_INFO(dev)->gen < 4) {
5115                 struct drm_i915_private *dev_priv = dev->dev_private;
5116                 int clock_limit =
5117                         dev_priv->display.get_display_clock_speed(dev);
5118 
5119                 /*
5120                  * Enable pixel doubling when the dot clock
5121                  * is > 90% of the (display) core speed.
5122                  *
5123                  * GDG double wide on either pipe,
5124                  * otherwise pipe A only.
5125                  */
5126                 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5127                     adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5128                         clock_limit *= 2;
5129                         pipe_config->double_wide = true;
5130                 }
5131 
5132                 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5133                         return -EINVAL;
5134         }
5135 
5136         /*
5137          * Pipe horizontal size must be even in:
5138          * - DVO ganged mode
5139          * - LVDS dual channel mode
5140          * - Double wide pipe
5141          */
5142         if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5143              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5144                 pipe_config->pipe_src_w &= ~1;
5145 
5146         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
5147          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5148          */
5149         if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5150                 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5151                 return -EINVAL;
5152 
5153         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5154                 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5155         } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5156                 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
5157                  * for lvds. */
5158                 pipe_config->pipe_bpp = 8*3;
5159         }
5160 
5161         if (HAS_IPS(dev))
5162                 hsw_compute_ips_config(crtc, pipe_config);
5163 
5164         /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
5165          * clock survives for now. */
5166         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5167                 pipe_config->shared_dpll = crtc->config.shared_dpll;
5168 
5169         if (pipe_config->has_pch_encoder)
5170                 return ironlake_fdi_compute_config(crtc, pipe_config);
5171 
5172         return 0;
5173 }
5174 
5175 static int valleyview_get_display_clock_speed(struct drm_device *dev)
5176 {
5177         return 400000; /* FIXME */
5178 }
5179 
5180 static int i945_get_display_clock_speed(struct drm_device *dev)
5181 {
5182         return 400000;
5183 }
5184 
5185 static int i915_get_display_clock_speed(struct drm_device *dev)
5186 {
5187         return 333000;
5188 }
5189 
5190 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5191 {
5192         return 200000;
5193 }
5194 
5195 static int pnv_get_display_clock_speed(struct drm_device *dev)
5196 {
5197         u16 gcfgc = 0;
5198 
5199         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5200 
5201         switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5202         case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5203                 return 267000;
5204         case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5205                 return 333000;
5206         case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5207                 return 444000;
5208         case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5209                 return 200000;
5210         default:
5211                 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5212         case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5213                 return 133000;
5214         case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5215                 return 167000;
5216         }
5217 }
5218 
5219 static int i915gm_get_display_clock_speed(struct drm_device *dev)
5220 {
5221         u16 gcfgc = 0;
5222 
5223         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5224 
5225         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5226                 return 133000;
5227         else {
5228                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5229                 case GC_DISPLAY_CLOCK_333_MHZ:
5230                         return 333000;
5231                 default:
5232                 case GC_DISPLAY_CLOCK_190_200_MHZ:
5233                         return 190000;
5234                 }
5235         }
5236 }
5237 
5238 static int i865_get_display_clock_speed(struct drm_device *dev)
5239 {
5240         return 266000;
5241 }
5242 
5243 static int i855_get_display_clock_speed(struct drm_device *dev)
5244 {
5245         u16 hpllcc = 0;
5246         /* Assume that the hardware is in the high speed state.  This
5247          * should be the default.
5248          */
5249         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5250         case GC_CLOCK_133_200:
5251         case GC_CLOCK_100_200:
5252                 return 200000;
5253         case GC_CLOCK_166_250:
5254                 return 250000;
5255         case GC_CLOCK_100_133:
5256                 return 133000;
5257         }
5258 
5259         /* Shouldn't happen */
5260         return 0;
5261 }
5262 
5263 static int i830_get_display_clock_speed(struct drm_device *dev)
5264 {
5265         return 133000;
5266 }
5267 
5268 static void
5269 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5270 {
5271         while (*num > DATA_LINK_M_N_MASK ||
5272                *den > DATA_LINK_M_N_MASK) {
5273                 *num >>= 1;
5274                 *den >>= 1;
5275         }
5276 }
5277 
5278 static void compute_m_n(unsigned int m, unsigned int n,
5279                         uint32_t *ret_m, uint32_t *ret_n)
5280 {
5281         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5282         *ret_m = div_u64((uint64_t) m * *ret_n, n);
5283         intel_reduce_m_n_ratio(ret_m, ret_n);
5284 }
5285 
5286 void
5287 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5288                        int pixel_clock, int link_clock,
5289                        struct intel_link_m_n *m_n)
5290 {
5291         m_n->tu = 64;
5292 
5293         compute_m_n(bits_per_pixel * pixel_clock,
5294                     link_clock * nlanes * 8,
5295                     &m_n->gmch_m, &m_n->gmch_n);
5296 
5297         compute_m_n(pixel_clock, link_clock,
5298                     &m_n->link_m, &m_n->link_n);
5299 }
5300 
5301 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5302 {
5303         if (i915.panel_use_ssc >= 0)
5304                 return i915.panel_use_ssc != 0;
5305         return dev_priv->vbt.lvds_use_ssc
5306                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5307 }
5308 
5309 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5310 {
5311         struct drm_device *dev = crtc->dev;
5312         struct drm_i915_private *dev_priv = dev->dev_private;
5313         int refclk;
5314 
5315         if (IS_VALLEYVIEW(dev)) {
5316                 refclk = 100000;
5317         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5318             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5319                 refclk = dev_priv->vbt.lvds_ssc_freq;
5320                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5321         } else if (!IS_GEN2(dev)) {
5322                 refclk = 96000;
5323         } else {
5324                 refclk = 48000;
5325         }
5326 
5327         return refclk;
5328 }
5329 
5330 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5331 {
5332         return (1 << dpll->n) << 16 | dpll->m2;
5333 }
5334 
5335 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5336 {
5337         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5338 }
5339 
5340 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5341                                      intel_clock_t *reduced_clock)
5342 {
5343         struct drm_device *dev = crtc->base.dev;
5344         u32 fp, fp2 = 0;
5345 
5346         if (IS_PINEVIEW(dev)) {
5347                 fp = pnv_dpll_compute_fp(&crtc->config.dpll);
5348                 if (reduced_clock)
5349                         fp2 = pnv_dpll_compute_fp(reduced_clock);
5350         } else {
5351                 fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
5352                 if (reduced_clock)
5353                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
5354         }
5355 
5356         crtc->config.dpll_hw_state.fp0 = fp;
5357 
5358         crtc->lowfreq_avail = false;
5359         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5360             reduced_clock && i915.powersave) {
5361                 crtc->config.dpll_hw_state.fp1 = fp2;
5362                 crtc->lowfreq_avail = true;
5363         } else {
5364                 crtc->config.dpll_hw_state.fp1 = fp;
5365         }
5366 }
5367 
5368 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5369                 pipe)
5370 {
5371         u32 reg_val;
5372 
5373         /*
5374          * PLLB opamp always calibrates to max value of 0x3f, force enable it
5375          * and set it to a reasonable value instead.
5376          */
5377         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5378         reg_val &= 0xffffff00;
5379         reg_val |= 0x00000030;
5380         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5381 
5382         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5383         reg_val &= 0x8cffffff;
5384         reg_val = 0x8c000000;
5385         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5386 
5387         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5388         reg_val &= 0xffffff00;
5389         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5390 
5391         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5392         reg_val &= 0x00ffffff;
5393         reg_val |= 0xb0000000;
5394         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5395 }
5396 
5397 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5398                                          struct intel_link_m_n *m_n)
5399 {
5400         struct drm_device *dev = crtc->base.dev;
5401         struct drm_i915_private *dev_priv = dev->dev_private;
5402         int pipe = crtc->pipe;
5403 
5404         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5405         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5406         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5407         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5408 }
5409 
5410 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5411                                          struct intel_link_m_n *m_n)
5412 {
5413         struct drm_device *dev = crtc->base.dev;
5414         struct drm_i915_private *dev_priv = dev->dev_private;
5415         int pipe = crtc->pipe;
5416         enum transcoder transcoder = crtc->config.cpu_transcoder;
5417 
5418         if (INTEL_INFO(dev)->gen >= 5) {
5419                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5420                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5421                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5422                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5423         } else {
5424                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5425                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5426                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5427                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5428         }
5429 }
5430 
5431 static void intel_dp_set_m_n(struct intel_crtc *crtc)
5432 {
5433         if (crtc->config.has_pch_encoder)
5434                 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5435         else
5436                 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5437 }
5438 
5439 static void vlv_update_pll(struct intel_crtc *crtc)
5440 {
5441         u32 dpll, dpll_md;
5442 
5443         /*
5444          * Enable DPIO clock input. We should never disable the reference
5445          * clock for pipe B, since VGA hotplug / manual detection depends
5446          * on it.
5447          */
5448         dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5449                 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5450         /* We should never disable this, set it here for state tracking */
5451         if (crtc->pipe == PIPE_B)
5452                 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5453         dpll |= DPLL_VCO_ENABLE;
5454         crtc->config.dpll_hw_state.dpll = dpll;
5455 
5456         dpll_md = (crtc->config.pixel_multiplier - 1)
5457                 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5458         crtc->config.dpll_hw_state.dpll_md = dpll_md;
5459 }
5460 
5461 static void vlv_prepare_pll(struct intel_crtc *crtc)
5462 {
5463         struct drm_device *dev = crtc->base.dev;
5464         struct drm_i915_private *dev_priv = dev->dev_private;
5465         int pipe = crtc->pipe;
5466         u32 mdiv;
5467         u32 bestn, bestm1, bestm2, bestp1, bestp2;
5468         u32 coreclk, reg_val;
5469 
5470         mutex_lock(&dev_priv->dpio_lock);
5471 
5472         bestn = crtc->config.dpll.n;
5473         bestm1 = crtc->config.dpll.m1;
5474         bestm2 = crtc->config.dpll.m2;
5475         bestp1 = crtc->config.dpll.p1;
5476         bestp2 = crtc->config.dpll.p2;
5477 
5478         /* See eDP HDMI DPIO driver vbios notes doc */
5479 
5480         /* PLL B needs special handling */
5481         if (pipe == PIPE_B)
5482                 vlv_pllb_recal_opamp(dev_priv, pipe);
5483 
5484         /* Set up Tx target for periodic Rcomp update */
5485         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5486 
5487         /* Disable target IRef on PLL */
5488         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5489         reg_val &= 0x00ffffff;
5490         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5491 
5492         /* Disable fast lock */
5493         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5494 
5495         /* Set idtafcrecal before PLL is enabled */
5496         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5497         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5498         mdiv |= ((bestn << DPIO_N_SHIFT));
5499         mdiv |= (1 << DPIO_K_SHIFT);
5500 
5501         /*
5502          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5503          * but we don't support that).
5504          * Note: don't use the DAC post divider as it seems unstable.
5505          */
5506         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5507         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5508 
5509         mdiv |= DPIO_ENABLE_CALIBRATION;
5510         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5511 
5512         /* Set HBR and RBR LPF coefficients */
5513         if (crtc->config.port_clock == 162000 ||
5514             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
5515             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
5516                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5517                                  0x009f0003);
5518         else
5519                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5520                                  0x00d0000f);
5521 
5522         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5523             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5524                 /* Use SSC source */
5525                 if (pipe == PIPE_A)
5526                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5527                                          0x0df40000);
5528                 else
5529                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5530                                          0x0df70000);
5531         } else { /* HDMI or VGA */
5532                 /* Use bend source */
5533                 if (pipe == PIPE_A)
5534                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5535                                          0x0df70000);
5536                 else
5537                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5538                                          0x0df40000);
5539         }
5540 
5541         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5542         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5543         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5544             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5545                 coreclk |= 0x01000000;
5546         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5547 
5548         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5549         mutex_unlock(&dev_priv->dpio_lock);
5550 }
5551 
5552 static void chv_update_pll(struct intel_crtc *crtc)
5553 {
5554         struct drm_device *dev = crtc->base.dev;
5555         struct drm_i915_private *dev_priv = dev->dev_private;
5556         int pipe = crtc->pipe;
5557         int dpll_reg = DPLL(crtc->pipe);
5558         enum dpio_channel port = vlv_pipe_to_channel(pipe);
5559         u32 loopfilter, intcoeff;
5560         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5561         int refclk;
5562 
5563         crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5564                 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5565                 DPLL_VCO_ENABLE;
5566         if (pipe != PIPE_A)
5567                 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5568 
5569         crtc->config.dpll_hw_state.dpll_md =
5570                 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5571 
5572         bestn = crtc->config.dpll.n;
5573         bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5574         bestm1 = crtc->config.dpll.m1;
5575         bestm2 = crtc->config.dpll.m2 >> 22;
5576         bestp1 = crtc->config.dpll.p1;
5577         bestp2 = crtc->config.dpll.p2;
5578 
5579         /*
5580          * Enable Refclk and SSC
5581          */
5582         I915_WRITE(dpll_reg,
5583                    crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5584 
5585         mutex_lock(&dev_priv->dpio_lock);
5586 
5587         /* p1 and p2 divider */
5588         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5589                         5 << DPIO_CHV_S1_DIV_SHIFT |
5590                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5591                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5592                         1 << DPIO_CHV_K_DIV_SHIFT);
5593 
5594         /* Feedback post-divider - m2 */
5595         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5596 
5597         /* Feedback refclk divider - n and m1 */
5598         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5599                         DPIO_CHV_M1_DIV_BY_2 |
5600                         1 << DPIO_CHV_N_DIV_SHIFT);
5601 
5602         /* M2 fraction division */
5603         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5604 
5605         /* M2 fraction division enable */
5606         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5607                        DPIO_CHV_FRAC_DIV_EN |
5608                        (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5609 
5610         /* Loop filter */
5611         refclk = i9xx_get_refclk(&crtc->base, 0);
5612         loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5613                 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5614         if (refclk == 100000)
5615                 intcoeff = 11;
5616         else if (refclk == 38400)
5617                 intcoeff = 10;
5618         else
5619                 intcoeff = 9;
5620         loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5621         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5622 
5623         /* AFC Recal */
5624         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5625                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5626                         DPIO_AFC_RECAL);
5627 
5628         mutex_unlock(&dev_priv->dpio_lock);
5629 }
5630 
5631 static void i9xx_update_pll(struct intel_crtc *crtc,
5632                             intel_clock_t *reduced_clock,
5633                             int num_connectors)
5634 {
5635         struct drm_device *dev = crtc->base.dev;
5636         struct drm_i915_private *dev_priv = dev->dev_private;
5637         u32 dpll;
5638         bool is_sdvo;
5639         struct dpll *clock = &crtc->config.dpll;
5640 
5641         i9xx_update_pll_dividers(crtc, reduced_clock);
5642 
5643         is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5644                 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5645 
5646         dpll = DPLL_VGA_MODE_DIS;
5647 
5648         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5649                 dpll |= DPLLB_MODE_LVDS;
5650         else
5651                 dpll |= DPLLB_MODE_DAC_SERIAL;
5652 
5653         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5654                 dpll |= (crtc->config.pixel_multiplier - 1)
5655                         << SDVO_MULTIPLIER_SHIFT_HIRES;
5656         }
5657 
5658         if (is_sdvo)
5659                 dpll |= DPLL_SDVO_HIGH_SPEED;
5660 
5661         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5662                 dpll |= DPLL_SDVO_HIGH_SPEED;
5663 
5664         /* compute bitmask from p1 value */
5665         if (IS_PINEVIEW(dev))
5666                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5667         else {
5668                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5669                 if (IS_G4X(dev) && reduced_clock)
5670                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5671         }
5672         switch (clock->p2) {
5673         case 5:
5674                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5675                 break;
5676         case 7:
5677                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5678                 break;
5679         case 10:
5680                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5681                 break;
5682         case 14:
5683                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5684                 break;
5685         }
5686         if (INTEL_INFO(dev)->gen >= 4)
5687                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5688 
5689         if (crtc->config.sdvo_tv_clock)
5690                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5691         else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5692                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5693                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5694         else
5695                 dpll |= PLL_REF_INPUT_DREFCLK;
5696 
5697         dpll |= DPLL_VCO_ENABLE;
5698         crtc->config.dpll_hw_state.dpll = dpll;
5699 
5700         if (INTEL_INFO(dev)->gen >= 4) {
5701                 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5702                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5703                 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5704         }
5705 }
5706 
5707 static void i8xx_update_pll(struct intel_crtc *crtc,
5708                             intel_clock_t *reduced_clock,
5709                             int num_connectors)
5710 {
5711         struct drm_device *dev = crtc->base.dev;
5712         struct drm_i915_private *dev_priv = dev->dev_private;
5713         u32 dpll;
5714         struct dpll *clock = &crtc->config.dpll;
5715 
5716         i9xx_update_pll_dividers(crtc, reduced_clock);
5717 
5718         dpll = DPLL_VGA_MODE_DIS;
5719 
5720         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5721                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5722         } else {
5723                 if (clock->p1 == 2)
5724                         dpll |= PLL_P1_DIVIDE_BY_TWO;
5725                 else
5726                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5727                 if (clock->p2 == 4)
5728                         dpll |= PLL_P2_DIVIDE_BY_4;
5729         }
5730 
5731         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5732                 dpll |= DPLL_DVO_2X_MODE;
5733 
5734         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5735                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5736                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5737         else
5738                 dpll |= PLL_REF_INPUT_DREFCLK;
5739 
5740         dpll |= DPLL_VCO_ENABLE;
5741         crtc->config.dpll_hw_state.dpll = dpll;
5742 }
5743 
5744 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5745 {
5746         struct drm_device *dev = intel_crtc->base.dev;
5747         struct drm_i915_private *dev_priv = dev->dev_private;
5748         enum pipe pipe = intel_crtc->pipe;
5749         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5750         struct drm_display_mode *adjusted_mode =
5751                 &intel_crtc->config.adjusted_mode;
5752         uint32_t crtc_vtotal, crtc_vblank_end;
5753         int vsyncshift = 0;
5754 
5755         /* We need to be careful not to changed the adjusted mode, for otherwise
5756          * the hw state checker will get angry at the mismatch. */
5757         crtc_vtotal = adjusted_mode->crtc_vtotal;
5758         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5759 
5760         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5761                 /* the chip adds 2 halflines automatically */
5762                 crtc_vtotal -= 1;
5763                 crtc_vblank_end -= 1;
5764 
5765                 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5766                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5767                 else
5768                         vsyncshift = adjusted_mode->crtc_hsync_start -
5769                                 adjusted_mode->crtc_htotal / 2;
5770                 if (vsyncshift < 0)
5771                         vsyncshift += adjusted_mode->crtc_htotal;
5772         }
5773 
5774         if (INTEL_INFO(dev)->gen > 3)
5775                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
5776 
5777         I915_WRITE(HTOTAL(cpu_transcoder),
5778                    (adjusted_mode->crtc_hdisplay - 1) |
5779                    ((adjusted_mode->crtc_htotal - 1) << 16));
5780         I915_WRITE(HBLANK(cpu_transcoder),
5781                    (adjusted_mode->crtc_hblank_start - 1) |
5782                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
5783         I915_WRITE(HSYNC(cpu_transcoder),
5784                    (adjusted_mode->crtc_hsync_start - 1) |
5785                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
5786 
5787         I915_WRITE(VTOTAL(cpu_transcoder),
5788                    (adjusted_mode->crtc_vdisplay - 1) |
5789                    ((crtc_vtotal - 1) << 16));
5790         I915_WRITE(VBLANK(cpu_transcoder),
5791                    (adjusted_mode->crtc_vblank_start - 1) |
5792                    ((crtc_vblank_end - 1) << 16));
5793         I915_WRITE(VSYNC(cpu_transcoder),
5794                    (adjusted_mode->crtc_vsync_start - 1) |
5795                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
5796 
5797         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
5798          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
5799          * documented on the DDI_FUNC_CTL register description, EDP Input Select
5800          * bits. */
5801         if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
5802             (pipe == PIPE_B || pipe == PIPE_C))
5803                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
5804 
5805         /* pipesrc controls the size that is scaled from, which should
5806          * always be the user's requested size.
5807          */
5808         I915_WRITE(PIPESRC(pipe),
5809                    ((intel_crtc->config.pipe_src_w - 1) << 16) |
5810                    (intel_crtc->config.pipe_src_h - 1));
5811 }
5812 
5813 static void intel_get_pipe_timings(struct intel_crtc *crtc,
5814                                    struct intel_crtc_config *pipe_config)
5815 {
5816         struct drm_device *dev = crtc->base.dev;
5817         struct drm_i915_private *dev_priv = dev->dev_private;
5818         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5819         uint32_t tmp;
5820 
5821         tmp = I915_READ(HTOTAL(cpu_transcoder));
5822         pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
5823         pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
5824         tmp = I915_READ(HBLANK(cpu_transcoder));
5825         pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
5826         pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
5827         tmp = I915_READ(HSYNC(cpu_transcoder));
5828         pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
5829         pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
5830 
5831         tmp = I915_READ(VTOTAL(cpu_transcoder));
5832         pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
5833         pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
5834         tmp = I915_READ(VBLANK(cpu_transcoder));
5835         pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
5836         pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
5837         tmp = I915_READ(VSYNC(cpu_transcoder));
5838         pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
5839         pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
5840 
5841         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
5842                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
5843                 pipe_config->adjusted_mode.crtc_vtotal += 1;
5844                 pipe_config->adjusted_mode.crtc_vblank_end += 1;
5845         }
5846 
5847         tmp = I915_READ(PIPESRC(crtc->pipe));
5848         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
5849         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
5850 
5851         pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
5852         pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
5853 }
5854 
5855 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5856                                  struct intel_crtc_config *pipe_config)
5857 {
5858         mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
5859         mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
5860         mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
5861         mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
5862 
5863         mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
5864         mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
5865         mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
5866         mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
5867 
5868         mode->flags = pipe_config->adjusted_mode.flags;
5869 
5870         mode->clock = pipe_config->adjusted_mode.crtc_clock;
5871         mode->flags |= pipe_config->adjusted_mode.flags;
5872 }
5873 
5874 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5875 {
5876         struct drm_device *dev = intel_crtc->base.dev;
5877         struct drm_i915_private *dev_priv = dev->dev_private;
5878         uint32_t pipeconf;
5879 
5880         pipeconf = 0;
5881 
5882         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
5883             I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
5884                 pipeconf |= PIPECONF_ENABLE;
5885 
5886         if (intel_crtc->config.double_wide)
5887                 pipeconf |= PIPECONF_DOUBLE_WIDE;
5888 
5889         /* only g4x and later have fancy bpc/dither controls */
5890         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5891                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
5892                 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
5893                         pipeconf |= PIPECONF_DITHER_EN |
5894                                     PIPECONF_DITHER_TYPE_SP;
5895 
5896                 switch (intel_crtc->config.pipe_bpp) {
5897                 case 18:
5898                         pipeconf |= PIPECONF_6BPC;
5899                         break;
5900                 case 24:
5901                         pipeconf |= PIPECONF_8BPC;
5902                         break;
5903                 case 30:
5904                         pipeconf |= PIPECONF_10BPC;
5905                         break;
5906                 default:
5907                         /* Case prevented by intel_choose_pipe_bpp_dither. */
5908                         BUG();
5909                 }
5910         }
5911 
5912         if (HAS_PIPE_CXSR(dev)) {
5913                 if (intel_crtc->lowfreq_avail) {
5914                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5915                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5916                 } else {
5917                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5918                 }
5919         }
5920 
5921         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5922                 if (INTEL_INFO(dev)->gen < 4 ||
5923                     intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5924                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5925                 else
5926                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
5927         } else
5928                 pipeconf |= PIPECONF_PROGRESSIVE;
5929 
5930         if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
5931                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
5932 
5933         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
5934         POSTING_READ(PIPECONF(intel_crtc->pipe));
5935 }
5936 
5937 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5938                               int x, int y,
5939                               struct drm_framebuffer *fb)
5940 {
5941         struct drm_device *dev = crtc->dev;
5942         struct drm_i915_private *dev_priv = dev->dev_private;
5943         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5944         int refclk, num_connectors = 0;
5945         intel_clock_t clock, reduced_clock;
5946         bool ok, has_reduced_clock = false;
5947         bool is_lvds = false, is_dsi = false;
5948         struct intel_encoder *encoder;
5949         const intel_limit_t *limit;
5950 
5951         for_each_encoder_on_crtc(dev, crtc, encoder) {
5952                 switch (encoder->type) {
5953                 case INTEL_OUTPUT_LVDS:
5954                         is_lvds = true;
5955                         break;
5956                 case INTEL_OUTPUT_DSI:
5957                         is_dsi = true;
5958                         break;
5959                 }
5960 
5961                 num_connectors++;
5962         }
5963 
5964         if (is_dsi)
5965                 return 0;
5966 
5967         if (!intel_crtc->config.clock_set) {
5968                 refclk = i9xx_get_refclk(crtc, num_connectors);
5969 
5970                 /*
5971                  * Returns a set of divisors for the desired target clock with
5972                  * the given refclk, or FALSE.  The returned values represent
5973                  * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
5974                  * 2) / p1 / p2.
5975                  */
5976                 limit = intel_limit(crtc, refclk);
5977                 ok = dev_priv->display.find_dpll(limit, crtc,
5978                                                  intel_crtc->config.port_clock,
5979                                                  refclk, NULL, &clock);
5980                 if (!ok) {
5981                         DRM_ERROR("Couldn't find PLL settings for mode!\n");
5982                         return -EINVAL;
5983                 }
5984 
5985                 if (is_lvds && dev_priv->lvds_downclock_avail) {
5986                         /*
5987                          * Ensure we match the reduced clock's P to the target
5988                          * clock.  If the clocks don't match, we can't switch
5989                          * the display clock by using the FP0/FP1. In such case
5990                          * we will disable the LVDS downclock feature.
5991                          */
5992                         has_reduced_clock =
5993                                 dev_priv->display.find_dpll(limit, crtc,
5994                                                             dev_priv->lvds_downclock,
5995                                                             refclk, &clock,
5996                                                             &reduced_clock);
5997                 }
5998                 /* Compat-code for transition, will disappear. */
5999                 intel_crtc->config.dpll.n = clock.n;
6000                 intel_crtc->config.dpll.m1 = clock.m1;
6001                 intel_crtc->config.dpll.m2 = clock.m2;
6002                 intel_crtc->config.dpll.p1 = clock.p1;
6003                 intel_crtc->config.dpll.p2 = clock.p2;
6004         }
6005 
6006         if (IS_GEN2(dev)) {
6007                 i8xx_update_pll(intel_crtc,
6008                                 has_reduced_clock ? &reduced_clock : NULL,
6009                                 num_connectors);
6010         } else if (IS_CHERRYVIEW(dev)) {
6011                 chv_update_pll(intel_crtc);
6012         } else if (IS_VALLEYVIEW(dev)) {
6013                 vlv_update_pll(intel_crtc);
6014         } else {
6015                 i9xx_update_pll(intel_crtc,
6016                                 has_reduced_clock ? &reduced_clock : NULL,
6017                                 num_connectors);
6018         }
6019 
6020         return 0;
6021 }
6022 
6023 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6024                                  struct intel_crtc_config *pipe_config)
6025 {
6026         struct drm_device *dev = crtc->base.dev;
6027         struct drm_i915_private *dev_priv = dev->dev_private;
6028         uint32_t tmp;
6029 
6030         if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6031                 return;
6032 
6033         tmp = I915_READ(PFIT_CONTROL);
6034         if (!(tmp & PFIT_ENABLE))
6035                 return;
6036 
6037         /* Check whether the pfit is attached to our pipe. */
6038         if (INTEL_INFO(dev)->gen < 4) {
6039                 if (crtc->pipe != PIPE_B)
6040                         return;
6041         } else {
6042                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6043                         return;
6044         }
6045 
6046         pipe_config->gmch_pfit.control = tmp;
6047         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6048         if (INTEL_INFO(dev)->gen < 5)
6049                 pipe_config->gmch_pfit.lvds_border_bits =
6050                         I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6051 }
6052 
6053 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6054                                struct intel_crtc_config *pipe_config)
6055 {
6056         struct drm_device *dev = crtc->base.dev;
6057         struct drm_i915_private *dev_priv = dev->dev_private;
6058         int pipe = pipe_config->cpu_transcoder;
6059         intel_clock_t clock;
6060         u32 mdiv;
6061         int refclk = 100000;
6062 
6063         mutex_lock(&dev_priv->dpio_lock);
6064         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6065         mutex_unlock(&dev_priv->dpio_lock);
6066 
6067         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6068         clock.m2 = mdiv & DPIO_M2DIV_MASK;
6069         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6070         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6071         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6072 
6073         vlv_clock(refclk, &clock);
6074 
6075         /* clock.dot is the fast clock */
6076         pipe_config->port_clock = clock.dot / 5;
6077 }
6078 
6079 static void i9xx_get_plane_config(struct intel_crtc *crtc,
6080                                   struct intel_plane_config *plane_config)
6081 {
6082         struct drm_device *dev = crtc->base.dev;
6083         struct drm_i915_private *dev_priv = dev->dev_private;
6084         u32 val, base, offset;
6085         int pipe = crtc->pipe, plane = crtc->plane;
6086         int fourcc, pixel_format;
6087         int aligned_height;
6088 
6089         crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6090         if (!crtc->base.primary->fb) {
6091                 DRM_DEBUG_KMS("failed to alloc fb\n");
6092                 return;
6093         }
6094 
6095         val = I915_READ(DSPCNTR(plane));
6096 
6097         if (INTEL_INFO(dev)->gen >= 4)
6098                 if (val & DISPPLANE_TILED)
6099                         plane_config->tiled = true;
6100 
6101         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6102         fourcc = intel_format_to_fourcc(pixel_format);
6103         crtc->base.primary->fb->pixel_format = fourcc;
6104         crtc->base.primary->fb->bits_per_pixel =
6105                 drm_format_plane_cpp(fourcc, 0) * 8;
6106 
6107         if (INTEL_INFO(dev)->gen >= 4) {
6108                 if (plane_config->tiled)
6109                         offset = I915_READ(DSPTILEOFF(plane));
6110                 else
6111                         offset = I915_READ(DSPLINOFF(plane));
6112                 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6113         } else {
6114                 base = I915_READ(DSPADDR(plane));
6115         }
6116         plane_config->base = base;
6117 
6118         val = I915_READ(PIPESRC(pipe));
6119         crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6120         crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6121 
6122         val = I915_READ(DSPSTRIDE(pipe));
6123         crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
6124 
6125         aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6126                                             plane_config->tiled);
6127 
6128         plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
6129                                    aligned_height, PAGE_SIZE);
6130 
6131         DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6132                       pipe, plane, crtc->base.primary->fb->width,
6133                       crtc->base.primary->fb->height,
6134                       crtc->base.primary->fb->bits_per_pixel, base,
6135                       crtc->base.primary->fb->pitches[0],
6136                       plane_config->size);
6137 
6138 }
6139 
6140 static void chv_crtc_clock_get(struct intel_crtc *crtc,
6141                                struct intel_crtc_config *pipe_config)
6142 {
6143         struct drm_device *dev = crtc->base.dev;
6144         struct drm_i915_private *dev_priv = dev->dev_private;
6145         int pipe = pipe_config->cpu_transcoder;
6146         enum dpio_channel port = vlv_pipe_to_channel(pipe);
6147         intel_clock_t clock;
6148         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
6149         int refclk = 100000;
6150 
6151         mutex_lock(&dev_priv->dpio_lock);
6152         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
6153         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
6154         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
6155         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
6156         mutex_unlock(&dev_priv->dpio_lock);
6157 
6158         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
6159         clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
6160         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
6161         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
6162         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
6163 
6164         chv_clock(refclk, &clock);
6165 
6166         /* clock.dot is the fast clock */
6167         pipe_config->port_clock = clock.dot / 5;
6168 }
6169 
6170 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6171                                  struct intel_crtc_config *pipe_config)
6172 {
6173         struct drm_device *dev = crtc->base.dev;
6174         struct drm_i915_private *dev_priv = dev->dev_private;
6175         uint32_t tmp;
6176 
6177         if (!intel_display_power_enabled(dev_priv,
6178                                          POWER_DOMAIN_PIPE(crtc->pipe)))
6179                 return false;
6180 
6181         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
6182         pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6183 
6184         tmp = I915_READ(PIPECONF(crtc->pipe));
6185         if (!(tmp & PIPECONF_ENABLE))
6186                 return false;
6187 
6188         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6189                 switch (tmp & PIPECONF_BPC_MASK) {
6190                 case PIPECONF_6BPC:
6191                         pipe_config->pipe_bpp = 18;
6192                         break;
6193                 case PIPECONF_8BPC:
6194                         pipe_config->pipe_bpp = 24;
6195                         break;
6196                 case PIPECONF_10BPC:
6197                         pipe_config->pipe_bpp = 30;
6198                         break;
6199                 default:
6200                         break;
6201                 }
6202         }
6203 
6204         if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
6205                 pipe_config->limited_color_range = true;
6206 
6207         if (INTEL_INFO(dev)->gen < 4)
6208                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
6209 
6210         intel_get_pipe_timings(crtc, pipe_config);
6211 
6212         i9xx_get_pfit_config(crtc, pipe_config);
6213 
6214         if (INTEL_INFO(dev)->gen >= 4) {
6215                 tmp = I915_READ(DPLL_MD(crtc->pipe));
6216                 pipe_config->pixel_multiplier =
6217                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
6218                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
6219                 pipe_config->dpll_hw_state.dpll_md = tmp;
6220         } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
6221                 tmp = I915_READ(DPLL(crtc->pipe));
6222                 pipe_config->pixel_multiplier =
6223                         ((tmp & SDVO_MULTIPLIER_MASK)
6224                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
6225         } else {
6226                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
6227                  * port and will be fixed up in the encoder->get_config
6228                  * function. */
6229                 pipe_config->pixel_multiplier = 1;
6230         }
6231         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6232         if (!IS_VALLEYVIEW(dev)) {
6233                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6234                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6235         } else {
6236                 /* Mask out read-only status bits. */
6237                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
6238                                                      DPLL_PORTC_READY_MASK |
6239                                                      DPLL_PORTB_READY_MASK);
6240         }
6241 
6242         if (IS_CHERRYVIEW(dev))
6243                 chv_crtc_clock_get(crtc, pipe_config);
6244         else if (IS_VALLEYVIEW(dev))
6245                 vlv_crtc_clock_get(crtc, pipe_config);
6246         else
6247                 i9xx_crtc_clock_get(crtc, pipe_config);
6248 
6249         return true;
6250 }
6251 
6252 static void ironlake_init_pch_refclk(struct drm_device *dev)
6253 {
6254         struct drm_i915_private *dev_priv = dev->dev_private;
6255         struct drm_mode_config *mode_config = &dev->mode_config;
6256         struct intel_encoder *encoder;
6257         u32 val, final;
6258         bool has_lvds = false;
6259         bool has_cpu_edp = false;
6260         bool has_panel = false;
6261         bool has_ck505 = false;
6262         bool can_ssc = false;
6263 
6264         /* We need to take the global config into account */
6265         list_for_each_entry(encoder, &mode_config->encoder_list,
6266                             base.head) {
6267                 switch (encoder->type) {
6268                 case INTEL_OUTPUT_LVDS:
6269                         has_panel = true;
6270                         has_lvds = true;
6271                         break;
6272                 case INTEL_OUTPUT_EDP:
6273                         has_panel = true;
6274                         if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6275                                 has_cpu_edp = true;
6276                         break;
6277                 }
6278         }
6279 
6280         if (HAS_PCH_IBX(dev)) {
6281                 has_ck505 = dev_priv->vbt.display_clock_mode;
6282                 can_ssc = has_ck505;
6283         } else {
6284                 has_ck505 = false;
6285                 can_ssc = true;
6286         }
6287 
6288         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
6289                       has_panel, has_lvds, has_ck505);
6290 
6291         /* Ironlake: try to setup display ref clock before DPLL
6292          * enabling. This is only under driver's control after
6293          * PCH B stepping, previous chipset stepping should be
6294          * ignoring this setting.
6295          */
6296         val = I915_READ(PCH_DREF_CONTROL);
6297 
6298         /* As we must carefully and slowly disable/enable each source in turn,
6299          * compute the final state we want first and check if we need to
6300          * make any changes at all.
6301          */
6302         final = val;
6303         final &= ~DREF_NONSPREAD_SOURCE_MASK;
6304         if (has_ck505)
6305                 final |= DREF_NONSPREAD_CK505_ENABLE;
6306         else
6307                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
6308 
6309         final &= ~DREF_SSC_SOURCE_MASK;
6310         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6311         final &= ~DREF_SSC1_ENABLE;
6312 
6313         if (has_panel) {
6314                 final |= DREF_SSC_SOURCE_ENABLE;
6315 
6316                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
6317                         final |= DREF_SSC1_ENABLE;
6318 
6319                 if (has_cpu_edp) {
6320                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
6321                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6322                         else
6323                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6324                 } else
6325                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6326         } else {
6327                 final |= DREF_SSC_SOURCE_DISABLE;
6328                 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6329         }
6330 
6331         if (final == val)
6332                 return;
6333 
6334         /* Always enable nonspread source */
6335         val &= ~DREF_NONSPREAD_SOURCE_MASK;
6336 
6337         if (has_ck505)
6338                 val |= DREF_NONSPREAD_CK505_ENABLE;
6339         else
6340                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
6341 
6342         if (has_panel) {
6343                 val &= ~DREF_SSC_SOURCE_MASK;
6344                 val |= DREF_SSC_SOURCE_ENABLE;
6345 
6346                 /* SSC must be turned on before enabling the CPU output  */
6347                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6348                         DRM_DEBUG_KMS("Using SSC on panel\n");
6349                         val |= DREF_SSC1_ENABLE;
6350                 } else
6351                         val &= ~DREF_SSC1_ENABLE;
6352 
6353                 /* Get SSC going before enabling the outputs */
6354                 I915_WRITE(PCH_DREF_CONTROL, val);
6355                 POSTING_READ(PCH_DREF_CONTROL);
6356                 udelay(200);
6357 
6358                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6359 
6360                 /* Enable CPU source on CPU attached eDP */
6361                 if (has_cpu_edp) {
6362                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
6363                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
6364                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
6365                         } else
6366                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
6367                 } else
6368                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6369 
6370                 I915_WRITE(PCH_DREF_CONTROL, val);
6371                 POSTING_READ(PCH_DREF_CONTROL);
6372                 udelay(200);
6373         } else {
6374                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
6375 
6376                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
6377 
6378                 /* Turn off CPU output */
6379                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
6380 
6381                 I915_WRITE(PCH_DREF_CONTROL, val);
6382                 POSTING_READ(PCH_DREF_CONTROL);
6383                 udelay(200);
6384 
6385                 /* Turn off the SSC source */
6386                 val &= ~DREF_SSC_SOURCE_MASK;
6387                 val |= DREF_SSC_SOURCE_DISABLE;
6388 
6389                 /* Turn off SSC1 */
6390                 val &= ~DREF_SSC1_ENABLE;
6391 
6392                 I915_WRITE(PCH_DREF_CONTROL, val);
6393                 POSTING_READ(PCH_DREF_CONTROL);
6394                 udelay(200);
6395         }
6396 
6397         BUG_ON(val != final);
6398 }
6399 
6400 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
6401 {
6402         uint32_t tmp;
6403 
6404         tmp = I915_READ(SOUTH_CHICKEN2);
6405         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
6406         I915_WRITE(SOUTH_CHICKEN2, tmp);
6407 
6408         if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
6409                                FDI_MPHY_IOSFSB_RESET_STATUS, 100))
6410                 DRM_ERROR("FDI mPHY reset assert timeout\n");
6411 
6412         tmp = I915_READ(SOUTH_CHICKEN2);
6413         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
6414         I915_WRITE(SOUTH_CHICKEN2, tmp);
6415 
6416         if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &