Version:  2.0.40 2.2.26 2.4.37 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18

Linux/drivers/gpu/drm/i915/intel_display.c

  1 /*
  2  * Copyright © 2006-2007 Intel Corporation
  3  *
  4  * Permission is hereby granted, free of charge, to any person obtaining a
  5  * copy of this software and associated documentation files (the "Software"),
  6  * to deal in the Software without restriction, including without limitation
  7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8  * and/or sell copies of the Software, and to permit persons to whom the
  9  * Software is furnished to do so, subject to the following conditions:
 10  *
 11  * The above copyright notice and this permission notice (including the next
 12  * paragraph) shall be included in all copies or substantial portions of the
 13  * Software.
 14  *
 15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 21  * DEALINGS IN THE SOFTWARE.
 22  *
 23  * Authors:
 24  *      Eric Anholt <eric@anholt.net>
 25  */
 26 
 27 #include <linux/dmi.h>
 28 #include <linux/module.h>
 29 #include <linux/input.h>
 30 #include <linux/i2c.h>
 31 #include <linux/kernel.h>
 32 #include <linux/slab.h>
 33 #include <linux/vgaarb.h>
 34 #include <drm/drm_edid.h>
 35 #include <drm/drmP.h>
 36 #include "intel_drv.h"
 37 #include <drm/i915_drm.h>
 38 #include "i915_drv.h"
 39 #include "i915_trace.h"
 40 #include <drm/drm_dp_helper.h>
 41 #include <drm/drm_crtc_helper.h>
 42 #include <drm/drm_plane_helper.h>
 43 #include <drm/drm_rect.h>
 44 #include <linux/dma_remapping.h>
 45 
 46 /* Primary plane formats supported by all gen */
 47 #define COMMON_PRIMARY_FORMATS \
 48         DRM_FORMAT_C8, \
 49         DRM_FORMAT_RGB565, \
 50         DRM_FORMAT_XRGB8888, \
 51         DRM_FORMAT_ARGB8888
 52 
 53 /* Primary plane formats for gen <= 3 */
 54 static const uint32_t intel_primary_formats_gen2[] = {
 55         COMMON_PRIMARY_FORMATS,
 56         DRM_FORMAT_XRGB1555,
 57         DRM_FORMAT_ARGB1555,
 58 };
 59 
 60 /* Primary plane formats for gen >= 4 */
 61 static const uint32_t intel_primary_formats_gen4[] = {
 62         COMMON_PRIMARY_FORMATS, \
 63         DRM_FORMAT_XBGR8888,
 64         DRM_FORMAT_ABGR8888,
 65         DRM_FORMAT_XRGB2101010,
 66         DRM_FORMAT_ARGB2101010,
 67         DRM_FORMAT_XBGR2101010,
 68         DRM_FORMAT_ABGR2101010,
 69 };
 70 
 71 /* Cursor formats */
 72 static const uint32_t intel_cursor_formats[] = {
 73         DRM_FORMAT_ARGB8888,
 74 };
 75 
 76 static void intel_increase_pllclock(struct drm_device *dev,
 77                                     enum pipe pipe);
 78 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 79 
 80 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 81                                 struct intel_crtc_config *pipe_config);
 82 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 83                                    struct intel_crtc_config *pipe_config);
 84 
 85 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
 86                           int x, int y, struct drm_framebuffer *old_fb);
 87 static int intel_framebuffer_init(struct drm_device *dev,
 88                                   struct intel_framebuffer *ifb,
 89                                   struct drm_mode_fb_cmd2 *mode_cmd,
 90                                   struct drm_i915_gem_object *obj);
 91 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
 92 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
 93 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
 94                                          struct intel_link_m_n *m_n,
 95                                          struct intel_link_m_n *m2_n2);
 96 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
 97 static void haswell_set_pipeconf(struct drm_crtc *crtc);
 98 static void intel_set_pipe_csc(struct drm_crtc *crtc);
 99 static void vlv_prepare_pll(struct intel_crtc *crtc);
100 static void chv_prepare_pll(struct intel_crtc *crtc);
101 
102 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
103 {
104         if (!connector->mst_port)
105                 return connector->encoder;
106         else
107                 return &connector->mst_port->mst_encoders[pipe]->base;
108 }
109 
110 typedef struct {
111         int     min, max;
112 } intel_range_t;
113 
114 typedef struct {
115         int     dot_limit;
116         int     p2_slow, p2_fast;
117 } intel_p2_t;
118 
119 typedef struct intel_limit intel_limit_t;
120 struct intel_limit {
121         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
122         intel_p2_t          p2;
123 };
124 
125 int
126 intel_pch_rawclk(struct drm_device *dev)
127 {
128         struct drm_i915_private *dev_priv = dev->dev_private;
129 
130         WARN_ON(!HAS_PCH_SPLIT(dev));
131 
132         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
133 }
134 
135 static inline u32 /* units of 100MHz */
136 intel_fdi_link_freq(struct drm_device *dev)
137 {
138         if (IS_GEN5(dev)) {
139                 struct drm_i915_private *dev_priv = dev->dev_private;
140                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
141         } else
142                 return 27;
143 }
144 
145 static const intel_limit_t intel_limits_i8xx_dac = {
146         .dot = { .min = 25000, .max = 350000 },
147         .vco = { .min = 908000, .max = 1512000 },
148         .n = { .min = 2, .max = 16 },
149         .m = { .min = 96, .max = 140 },
150         .m1 = { .min = 18, .max = 26 },
151         .m2 = { .min = 6, .max = 16 },
152         .p = { .min = 4, .max = 128 },
153         .p1 = { .min = 2, .max = 33 },
154         .p2 = { .dot_limit = 165000,
155                 .p2_slow = 4, .p2_fast = 2 },
156 };
157 
158 static const intel_limit_t intel_limits_i8xx_dvo = {
159         .dot = { .min = 25000, .max = 350000 },
160         .vco = { .min = 908000, .max = 1512000 },
161         .n = { .min = 2, .max = 16 },
162         .m = { .min = 96, .max = 140 },
163         .m1 = { .min = 18, .max = 26 },
164         .m2 = { .min = 6, .max = 16 },
165         .p = { .min = 4, .max = 128 },
166         .p1 = { .min = 2, .max = 33 },
167         .p2 = { .dot_limit = 165000,
168                 .p2_slow = 4, .p2_fast = 4 },
169 };
170 
171 static const intel_limit_t intel_limits_i8xx_lvds = {
172         .dot = { .min = 25000, .max = 350000 },
173         .vco = { .min = 908000, .max = 1512000 },
174         .n = { .min = 2, .max = 16 },
175         .m = { .min = 96, .max = 140 },
176         .m1 = { .min = 18, .max = 26 },
177         .m2 = { .min = 6, .max = 16 },
178         .p = { .min = 4, .max = 128 },
179         .p1 = { .min = 1, .max = 6 },
180         .p2 = { .dot_limit = 165000,
181                 .p2_slow = 14, .p2_fast = 7 },
182 };
183 
184 static const intel_limit_t intel_limits_i9xx_sdvo = {
185         .dot = { .min = 20000, .max = 400000 },
186         .vco = { .min = 1400000, .max = 2800000 },
187         .n = { .min = 1, .max = 6 },
188         .m = { .min = 70, .max = 120 },
189         .m1 = { .min = 8, .max = 18 },
190         .m2 = { .min = 3, .max = 7 },
191         .p = { .min = 5, .max = 80 },
192         .p1 = { .min = 1, .max = 8 },
193         .p2 = { .dot_limit = 200000,
194                 .p2_slow = 10, .p2_fast = 5 },
195 };
196 
197 static const intel_limit_t intel_limits_i9xx_lvds = {
198         .dot = { .min = 20000, .max = 400000 },
199         .vco = { .min = 1400000, .max = 2800000 },
200         .n = { .min = 1, .max = 6 },
201         .m = { .min = 70, .max = 120 },
202         .m1 = { .min = 8, .max = 18 },
203         .m2 = { .min = 3, .max = 7 },
204         .p = { .min = 7, .max = 98 },
205         .p1 = { .min = 1, .max = 8 },
206         .p2 = { .dot_limit = 112000,
207                 .p2_slow = 14, .p2_fast = 7 },
208 };
209 
210 
211 static const intel_limit_t intel_limits_g4x_sdvo = {
212         .dot = { .min = 25000, .max = 270000 },
213         .vco = { .min = 1750000, .max = 3500000},
214         .n = { .min = 1, .max = 4 },
215         .m = { .min = 104, .max = 138 },
216         .m1 = { .min = 17, .max = 23 },
217         .m2 = { .min = 5, .max = 11 },
218         .p = { .min = 10, .max = 30 },
219         .p1 = { .min = 1, .max = 3},
220         .p2 = { .dot_limit = 270000,
221                 .p2_slow = 10,
222                 .p2_fast = 10
223         },
224 };
225 
226 static const intel_limit_t intel_limits_g4x_hdmi = {
227         .dot = { .min = 22000, .max = 400000 },
228         .vco = { .min = 1750000, .max = 3500000},
229         .n = { .min = 1, .max = 4 },
230         .m = { .min = 104, .max = 138 },
231         .m1 = { .min = 16, .max = 23 },
232         .m2 = { .min = 5, .max = 11 },
233         .p = { .min = 5, .max = 80 },
234         .p1 = { .min = 1, .max = 8},
235         .p2 = { .dot_limit = 165000,
236                 .p2_slow = 10, .p2_fast = 5 },
237 };
238 
239 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
240         .dot = { .min = 20000, .max = 115000 },
241         .vco = { .min = 1750000, .max = 3500000 },
242         .n = { .min = 1, .max = 3 },
243         .m = { .min = 104, .max = 138 },
244         .m1 = { .min = 17, .max = 23 },
245         .m2 = { .min = 5, .max = 11 },
246         .p = { .min = 28, .max = 112 },
247         .p1 = { .min = 2, .max = 8 },
248         .p2 = { .dot_limit = 0,
249                 .p2_slow = 14, .p2_fast = 14
250         },
251 };
252 
253 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
254         .dot = { .min = 80000, .max = 224000 },
255         .vco = { .min = 1750000, .max = 3500000 },
256         .n = { .min = 1, .max = 3 },
257         .m = { .min = 104, .max = 138 },
258         .m1 = { .min = 17, .max = 23 },
259         .m2 = { .min = 5, .max = 11 },
260         .p = { .min = 14, .max = 42 },
261         .p1 = { .min = 2, .max = 6 },
262         .p2 = { .dot_limit = 0,
263                 .p2_slow = 7, .p2_fast = 7
264         },
265 };
266 
267 static const intel_limit_t intel_limits_pineview_sdvo = {
268         .dot = { .min = 20000, .max = 400000},
269         .vco = { .min = 1700000, .max = 3500000 },
270         /* Pineview's Ncounter is a ring counter */
271         .n = { .min = 3, .max = 6 },
272         .m = { .min = 2, .max = 256 },
273         /* Pineview only has one combined m divider, which we treat as m2. */
274         .m1 = { .min = 0, .max = 0 },
275         .m2 = { .min = 0, .max = 254 },
276         .p = { .min = 5, .max = 80 },
277         .p1 = { .min = 1, .max = 8 },
278         .p2 = { .dot_limit = 200000,
279                 .p2_slow = 10, .p2_fast = 5 },
280 };
281 
282 static const intel_limit_t intel_limits_pineview_lvds = {
283         .dot = { .min = 20000, .max = 400000 },
284         .vco = { .min = 1700000, .max = 3500000 },
285         .n = { .min = 3, .max = 6 },
286         .m = { .min = 2, .max = 256 },
287         .m1 = { .min = 0, .max = 0 },
288         .m2 = { .min = 0, .max = 254 },
289         .p = { .min = 7, .max = 112 },
290         .p1 = { .min = 1, .max = 8 },
291         .p2 = { .dot_limit = 112000,
292                 .p2_slow = 14, .p2_fast = 14 },
293 };
294 
295 /* Ironlake / Sandybridge
296  *
297  * We calculate clock using (register_value + 2) for N/M1/M2, so here
298  * the range value for them is (actual_value - 2).
299  */
300 static const intel_limit_t intel_limits_ironlake_dac = {
301         .dot = { .min = 25000, .max = 350000 },
302         .vco = { .min = 1760000, .max = 3510000 },
303         .n = { .min = 1, .max = 5 },
304         .m = { .min = 79, .max = 127 },
305         .m1 = { .min = 12, .max = 22 },
306         .m2 = { .min = 5, .max = 9 },
307         .p = { .min = 5, .max = 80 },
308         .p1 = { .min = 1, .max = 8 },
309         .p2 = { .dot_limit = 225000,
310                 .p2_slow = 10, .p2_fast = 5 },
311 };
312 
313 static const intel_limit_t intel_limits_ironlake_single_lvds = {
314         .dot = { .min = 25000, .max = 350000 },
315         .vco = { .min = 1760000, .max = 3510000 },
316         .n = { .min = 1, .max = 3 },
317         .m = { .min = 79, .max = 118 },
318         .m1 = { .min = 12, .max = 22 },
319         .m2 = { .min = 5, .max = 9 },
320         .p = { .min = 28, .max = 112 },
321         .p1 = { .min = 2, .max = 8 },
322         .p2 = { .dot_limit = 225000,
323                 .p2_slow = 14, .p2_fast = 14 },
324 };
325 
326 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
327         .dot = { .min = 25000, .max = 350000 },
328         .vco = { .min = 1760000, .max = 3510000 },
329         .n = { .min = 1, .max = 3 },
330         .m = { .min = 79, .max = 127 },
331         .m1 = { .min = 12, .max = 22 },
332         .m2 = { .min = 5, .max = 9 },
333         .p = { .min = 14, .max = 56 },
334         .p1 = { .min = 2, .max = 8 },
335         .p2 = { .dot_limit = 225000,
336                 .p2_slow = 7, .p2_fast = 7 },
337 };
338 
339 /* LVDS 100mhz refclk limits. */
340 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
341         .dot = { .min = 25000, .max = 350000 },
342         .vco = { .min = 1760000, .max = 3510000 },
343         .n = { .min = 1, .max = 2 },
344         .m = { .min = 79, .max = 126 },
345         .m1 = { .min = 12, .max = 22 },
346         .m2 = { .min = 5, .max = 9 },
347         .p = { .min = 28, .max = 112 },
348         .p1 = { .min = 2, .max = 8 },
349         .p2 = { .dot_limit = 225000,
350                 .p2_slow = 14, .p2_fast = 14 },
351 };
352 
353 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
354         .dot = { .min = 25000, .max = 350000 },
355         .vco = { .min = 1760000, .max = 3510000 },
356         .n = { .min = 1, .max = 3 },
357         .m = { .min = 79, .max = 126 },
358         .m1 = { .min = 12, .max = 22 },
359         .m2 = { .min = 5, .max = 9 },
360         .p = { .min = 14, .max = 42 },
361         .p1 = { .min = 2, .max = 6 },
362         .p2 = { .dot_limit = 225000,
363                 .p2_slow = 7, .p2_fast = 7 },
364 };
365 
366 static const intel_limit_t intel_limits_vlv = {
367          /*
368           * These are the data rate limits (measured in fast clocks)
369           * since those are the strictest limits we have. The fast
370           * clock and actual rate limits are more relaxed, so checking
371           * them would make no difference.
372           */
373         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
374         .vco = { .min = 4000000, .max = 6000000 },
375         .n = { .min = 1, .max = 7 },
376         .m1 = { .min = 2, .max = 3 },
377         .m2 = { .min = 11, .max = 156 },
378         .p1 = { .min = 2, .max = 3 },
379         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
380 };
381 
382 static const intel_limit_t intel_limits_chv = {
383         /*
384          * These are the data rate limits (measured in fast clocks)
385          * since those are the strictest limits we have.  The fast
386          * clock and actual rate limits are more relaxed, so checking
387          * them would make no difference.
388          */
389         .dot = { .min = 25000 * 5, .max = 540000 * 5},
390         .vco = { .min = 4860000, .max = 6700000 },
391         .n = { .min = 1, .max = 1 },
392         .m1 = { .min = 2, .max = 2 },
393         .m2 = { .min = 24 << 22, .max = 175 << 22 },
394         .p1 = { .min = 2, .max = 4 },
395         .p2 = { .p2_slow = 1, .p2_fast = 14 },
396 };
397 
398 static void vlv_clock(int refclk, intel_clock_t *clock)
399 {
400         clock->m = clock->m1 * clock->m2;
401         clock->p = clock->p1 * clock->p2;
402         if (WARN_ON(clock->n == 0 || clock->p == 0))
403                 return;
404         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
405         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
406 }
407 
408 /**
409  * Returns whether any output on the specified pipe is of the specified type
410  */
411 static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
412 {
413         struct drm_device *dev = crtc->dev;
414         struct intel_encoder *encoder;
415 
416         for_each_encoder_on_crtc(dev, crtc, encoder)
417                 if (encoder->type == type)
418                         return true;
419 
420         return false;
421 }
422 
423 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
424                                                 int refclk)
425 {
426         struct drm_device *dev = crtc->dev;
427         const intel_limit_t *limit;
428 
429         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
430                 if (intel_is_dual_link_lvds(dev)) {
431                         if (refclk == 100000)
432                                 limit = &intel_limits_ironlake_dual_lvds_100m;
433                         else
434                                 limit = &intel_limits_ironlake_dual_lvds;
435                 } else {
436                         if (refclk == 100000)
437                                 limit = &intel_limits_ironlake_single_lvds_100m;
438                         else
439                                 limit = &intel_limits_ironlake_single_lvds;
440                 }
441         } else
442                 limit = &intel_limits_ironlake_dac;
443 
444         return limit;
445 }
446 
447 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
448 {
449         struct drm_device *dev = crtc->dev;
450         const intel_limit_t *limit;
451 
452         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
453                 if (intel_is_dual_link_lvds(dev))
454                         limit = &intel_limits_g4x_dual_channel_lvds;
455                 else
456                         limit = &intel_limits_g4x_single_channel_lvds;
457         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
458                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
459                 limit = &intel_limits_g4x_hdmi;
460         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
461                 limit = &intel_limits_g4x_sdvo;
462         } else /* The option is for other outputs */
463                 limit = &intel_limits_i9xx_sdvo;
464 
465         return limit;
466 }
467 
468 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
469 {
470         struct drm_device *dev = crtc->dev;
471         const intel_limit_t *limit;
472 
473         if (HAS_PCH_SPLIT(dev))
474                 limit = intel_ironlake_limit(crtc, refclk);
475         else if (IS_G4X(dev)) {
476                 limit = intel_g4x_limit(crtc);
477         } else if (IS_PINEVIEW(dev)) {
478                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
479                         limit = &intel_limits_pineview_lvds;
480                 else
481                         limit = &intel_limits_pineview_sdvo;
482         } else if (IS_CHERRYVIEW(dev)) {
483                 limit = &intel_limits_chv;
484         } else if (IS_VALLEYVIEW(dev)) {
485                 limit = &intel_limits_vlv;
486         } else if (!IS_GEN2(dev)) {
487                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
488                         limit = &intel_limits_i9xx_lvds;
489                 else
490                         limit = &intel_limits_i9xx_sdvo;
491         } else {
492                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
493                         limit = &intel_limits_i8xx_lvds;
494                 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
495                         limit = &intel_limits_i8xx_dvo;
496                 else
497                         limit = &intel_limits_i8xx_dac;
498         }
499         return limit;
500 }
501 
502 /* m1 is reserved as 0 in Pineview, n is a ring counter */
503 static void pineview_clock(int refclk, intel_clock_t *clock)
504 {
505         clock->m = clock->m2 + 2;
506         clock->p = clock->p1 * clock->p2;
507         if (WARN_ON(clock->n == 0 || clock->p == 0))
508                 return;
509         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
510         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
511 }
512 
513 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
514 {
515         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
516 }
517 
518 static void i9xx_clock(int refclk, intel_clock_t *clock)
519 {
520         clock->m = i9xx_dpll_compute_m(clock);
521         clock->p = clock->p1 * clock->p2;
522         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
523                 return;
524         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
525         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
526 }
527 
528 static void chv_clock(int refclk, intel_clock_t *clock)
529 {
530         clock->m = clock->m1 * clock->m2;
531         clock->p = clock->p1 * clock->p2;
532         if (WARN_ON(clock->n == 0 || clock->p == 0))
533                 return;
534         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
535                         clock->n << 22);
536         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
537 }
538 
539 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
540 /**
541  * Returns whether the given set of divisors are valid for a given refclk with
542  * the given connectors.
543  */
544 
545 static bool intel_PLL_is_valid(struct drm_device *dev,
546                                const intel_limit_t *limit,
547                                const intel_clock_t *clock)
548 {
549         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
550                 INTELPllInvalid("n out of range\n");
551         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
552                 INTELPllInvalid("p1 out of range\n");
553         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
554                 INTELPllInvalid("m2 out of range\n");
555         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
556                 INTELPllInvalid("m1 out of range\n");
557 
558         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
559                 if (clock->m1 <= clock->m2)
560                         INTELPllInvalid("m1 <= m2\n");
561 
562         if (!IS_VALLEYVIEW(dev)) {
563                 if (clock->p < limit->p.min || limit->p.max < clock->p)
564                         INTELPllInvalid("p out of range\n");
565                 if (clock->m < limit->m.min || limit->m.max < clock->m)
566                         INTELPllInvalid("m out of range\n");
567         }
568 
569         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
570                 INTELPllInvalid("vco out of range\n");
571         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
572          * connector, etc., rather than just a single range.
573          */
574         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
575                 INTELPllInvalid("dot out of range\n");
576 
577         return true;
578 }
579 
580 static bool
581 i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
582                     int target, int refclk, intel_clock_t *match_clock,
583                     intel_clock_t *best_clock)
584 {
585         struct drm_device *dev = crtc->dev;
586         intel_clock_t clock;
587         int err = target;
588 
589         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
590                 /*
591                  * For LVDS just rely on its current settings for dual-channel.
592                  * We haven't figured out how to reliably set up different
593                  * single/dual channel state, if we even can.
594                  */
595                 if (intel_is_dual_link_lvds(dev))
596                         clock.p2 = limit->p2.p2_fast;
597                 else
598                         clock.p2 = limit->p2.p2_slow;
599         } else {
600                 if (target < limit->p2.dot_limit)
601                         clock.p2 = limit->p2.p2_slow;
602                 else
603                         clock.p2 = limit->p2.p2_fast;
604         }
605 
606         memset(best_clock, 0, sizeof(*best_clock));
607 
608         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
609              clock.m1++) {
610                 for (clock.m2 = limit->m2.min;
611                      clock.m2 <= limit->m2.max; clock.m2++) {
612                         if (clock.m2 >= clock.m1)
613                                 break;
614                         for (clock.n = limit->n.min;
615                              clock.n <= limit->n.max; clock.n++) {
616                                 for (clock.p1 = limit->p1.min;
617                                         clock.p1 <= limit->p1.max; clock.p1++) {
618                                         int this_err;
619 
620                                         i9xx_clock(refclk, &clock);
621                                         if (!intel_PLL_is_valid(dev, limit,
622                                                                 &clock))
623                                                 continue;
624                                         if (match_clock &&
625                                             clock.p != match_clock->p)
626                                                 continue;
627 
628                                         this_err = abs(clock.dot - target);
629                                         if (this_err < err) {
630                                                 *best_clock = clock;
631                                                 err = this_err;
632                                         }
633                                 }
634                         }
635                 }
636         }
637 
638         return (err != target);
639 }
640 
641 static bool
642 pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
643                    int target, int refclk, intel_clock_t *match_clock,
644                    intel_clock_t *best_clock)
645 {
646         struct drm_device *dev = crtc->dev;
647         intel_clock_t clock;
648         int err = target;
649 
650         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
651                 /*
652                  * For LVDS just rely on its current settings for dual-channel.
653                  * We haven't figured out how to reliably set up different
654                  * single/dual channel state, if we even can.
655                  */
656                 if (intel_is_dual_link_lvds(dev))
657                         clock.p2 = limit->p2.p2_fast;
658                 else
659                         clock.p2 = limit->p2.p2_slow;
660         } else {
661                 if (target < limit->p2.dot_limit)
662                         clock.p2 = limit->p2.p2_slow;
663                 else
664                         clock.p2 = limit->p2.p2_fast;
665         }
666 
667         memset(best_clock, 0, sizeof(*best_clock));
668 
669         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
670              clock.m1++) {
671                 for (clock.m2 = limit->m2.min;
672                      clock.m2 <= limit->m2.max; clock.m2++) {
673                         for (clock.n = limit->n.min;
674                              clock.n <= limit->n.max; clock.n++) {
675                                 for (clock.p1 = limit->p1.min;
676                                         clock.p1 <= limit->p1.max; clock.p1++) {
677                                         int this_err;
678 
679                                         pineview_clock(refclk, &clock);
680                                         if (!intel_PLL_is_valid(dev, limit,
681                                                                 &clock))
682                                                 continue;
683                                         if (match_clock &&
684                                             clock.p != match_clock->p)
685                                                 continue;
686 
687                                         this_err = abs(clock.dot - target);
688                                         if (this_err < err) {
689                                                 *best_clock = clock;
690                                                 err = this_err;
691                                         }
692                                 }
693                         }
694                 }
695         }
696 
697         return (err != target);
698 }
699 
700 static bool
701 g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
702                    int target, int refclk, intel_clock_t *match_clock,
703                    intel_clock_t *best_clock)
704 {
705         struct drm_device *dev = crtc->dev;
706         intel_clock_t clock;
707         int max_n;
708         bool found;
709         /* approximately equals target * 0.00585 */
710         int err_most = (target >> 8) + (target >> 9);
711         found = false;
712 
713         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
714                 if (intel_is_dual_link_lvds(dev))
715                         clock.p2 = limit->p2.p2_fast;
716                 else
717                         clock.p2 = limit->p2.p2_slow;
718         } else {
719                 if (target < limit->p2.dot_limit)
720                         clock.p2 = limit->p2.p2_slow;
721                 else
722                         clock.p2 = limit->p2.p2_fast;
723         }
724 
725         memset(best_clock, 0, sizeof(*best_clock));
726         max_n = limit->n.max;
727         /* based on hardware requirement, prefer smaller n to precision */
728         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
729                 /* based on hardware requirement, prefere larger m1,m2 */
730                 for (clock.m1 = limit->m1.max;
731                      clock.m1 >= limit->m1.min; clock.m1--) {
732                         for (clock.m2 = limit->m2.max;
733                              clock.m2 >= limit->m2.min; clock.m2--) {
734                                 for (clock.p1 = limit->p1.max;
735                                      clock.p1 >= limit->p1.min; clock.p1--) {
736                                         int this_err;
737 
738                                         i9xx_clock(refclk, &clock);
739                                         if (!intel_PLL_is_valid(dev, limit,
740                                                                 &clock))
741                                                 continue;
742 
743                                         this_err = abs(clock.dot - target);
744                                         if (this_err < err_most) {
745                                                 *best_clock = clock;
746                                                 err_most = this_err;
747                                                 max_n = clock.n;
748                                                 found = true;
749                                         }
750                                 }
751                         }
752                 }
753         }
754         return found;
755 }
756 
757 static bool
758 vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
759                    int target, int refclk, intel_clock_t *match_clock,
760                    intel_clock_t *best_clock)
761 {
762         struct drm_device *dev = crtc->dev;
763         intel_clock_t clock;
764         unsigned int bestppm = 1000000;
765         /* min update 19.2 MHz */
766         int max_n = min(limit->n.max, refclk / 19200);
767         bool found = false;
768 
769         target *= 5; /* fast clock */
770 
771         memset(best_clock, 0, sizeof(*best_clock));
772 
773         /* based on hardware requirement, prefer smaller n to precision */
774         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
775                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
776                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
777                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
778                                 clock.p = clock.p1 * clock.p2;
779                                 /* based on hardware requirement, prefer bigger m1,m2 values */
780                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
781                                         unsigned int ppm, diff;
782 
783                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
784                                                                      refclk * clock.m1);
785 
786                                         vlv_clock(refclk, &clock);
787 
788                                         if (!intel_PLL_is_valid(dev, limit,
789                                                                 &clock))
790                                                 continue;
791 
792                                         diff = abs(clock.dot - target);
793                                         ppm = div_u64(1000000ULL * diff, target);
794 
795                                         if (ppm < 100 && clock.p > best_clock->p) {
796                                                 bestppm = 0;
797                                                 *best_clock = clock;
798                                                 found = true;
799                                         }
800 
801                                         if (bestppm >= 10 && ppm < bestppm - 10) {
802                                                 bestppm = ppm;
803                                                 *best_clock = clock;
804                                                 found = true;
805                                         }
806                                 }
807                         }
808                 }
809         }
810 
811         return found;
812 }
813 
814 static bool
815 chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
816                    int target, int refclk, intel_clock_t *match_clock,
817                    intel_clock_t *best_clock)
818 {
819         struct drm_device *dev = crtc->dev;
820         intel_clock_t clock;
821         uint64_t m2;
822         int found = false;
823 
824         memset(best_clock, 0, sizeof(*best_clock));
825 
826         /*
827          * Based on hardware doc, the n always set to 1, and m1 always
828          * set to 2.  If requires to support 200Mhz refclk, we need to
829          * revisit this because n may not 1 anymore.
830          */
831         clock.n = 1, clock.m1 = 2;
832         target *= 5;    /* fast clock */
833 
834         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
835                 for (clock.p2 = limit->p2.p2_fast;
836                                 clock.p2 >= limit->p2.p2_slow;
837                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
838 
839                         clock.p = clock.p1 * clock.p2;
840 
841                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
842                                         clock.n) << 22, refclk * clock.m1);
843 
844                         if (m2 > INT_MAX/clock.m1)
845                                 continue;
846 
847                         clock.m2 = m2;
848 
849                         chv_clock(refclk, &clock);
850 
851                         if (!intel_PLL_is_valid(dev, limit, &clock))
852                                 continue;
853 
854                         /* based on hardware requirement, prefer bigger p
855                          */
856                         if (clock.p > best_clock->p) {
857                                 *best_clock = clock;
858                                 found = true;
859                         }
860                 }
861         }
862 
863         return found;
864 }
865 
866 bool intel_crtc_active(struct drm_crtc *crtc)
867 {
868         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
869 
870         /* Be paranoid as we can arrive here with only partial
871          * state retrieved from the hardware during setup.
872          *
873          * We can ditch the adjusted_mode.crtc_clock check as soon
874          * as Haswell has gained clock readout/fastboot support.
875          *
876          * We can ditch the crtc->primary->fb check as soon as we can
877          * properly reconstruct framebuffers.
878          */
879         return intel_crtc->active && crtc->primary->fb &&
880                 intel_crtc->config.adjusted_mode.crtc_clock;
881 }
882 
883 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
884                                              enum pipe pipe)
885 {
886         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
887         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
888 
889         return intel_crtc->config.cpu_transcoder;
890 }
891 
892 static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
893 {
894         struct drm_i915_private *dev_priv = dev->dev_private;
895         u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
896 
897         frame = I915_READ(frame_reg);
898 
899         if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
900                 WARN(1, "vblank wait on pipe %c timed out\n",
901                      pipe_name(pipe));
902 }
903 
904 /**
905  * intel_wait_for_vblank - wait for vblank on a given pipe
906  * @dev: drm device
907  * @pipe: pipe to wait for
908  *
909  * Wait for vblank to occur on a given pipe.  Needed for various bits of
910  * mode setting code.
911  */
912 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
913 {
914         struct drm_i915_private *dev_priv = dev->dev_private;
915         int pipestat_reg = PIPESTAT(pipe);
916 
917         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
918                 g4x_wait_for_vblank(dev, pipe);
919                 return;
920         }
921 
922         /* Clear existing vblank status. Note this will clear any other
923          * sticky status fields as well.
924          *
925          * This races with i915_driver_irq_handler() with the result
926          * that either function could miss a vblank event.  Here it is not
927          * fatal, as we will either wait upon the next vblank interrupt or
928          * timeout.  Generally speaking intel_wait_for_vblank() is only
929          * called during modeset at which time the GPU should be idle and
930          * should *not* be performing page flips and thus not waiting on
931          * vblanks...
932          * Currently, the result of us stealing a vblank from the irq
933          * handler is that a single frame will be skipped during swapbuffers.
934          */
935         I915_WRITE(pipestat_reg,
936                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
937 
938         /* Wait for vblank interrupt bit to set */
939         if (wait_for(I915_READ(pipestat_reg) &
940                      PIPE_VBLANK_INTERRUPT_STATUS,
941                      50))
942                 DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
943                               pipe_name(pipe));
944 }
945 
946 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
947 {
948         struct drm_i915_private *dev_priv = dev->dev_private;
949         u32 reg = PIPEDSL(pipe);
950         u32 line1, line2;
951         u32 line_mask;
952 
953         if (IS_GEN2(dev))
954                 line_mask = DSL_LINEMASK_GEN2;
955         else
956                 line_mask = DSL_LINEMASK_GEN3;
957 
958         line1 = I915_READ(reg) & line_mask;
959         mdelay(5);
960         line2 = I915_READ(reg) & line_mask;
961 
962         return line1 == line2;
963 }
964 
965 /*
966  * intel_wait_for_pipe_off - wait for pipe to turn off
967  * @crtc: crtc whose pipe to wait for
968  *
969  * After disabling a pipe, we can't wait for vblank in the usual way,
970  * spinning on the vblank interrupt status bit, since we won't actually
971  * see an interrupt when the pipe is disabled.
972  *
973  * On Gen4 and above:
974  *   wait for the pipe register state bit to turn off
975  *
976  * Otherwise:
977  *   wait for the display line value to settle (it usually
978  *   ends up stopping at the start of the next frame).
979  *
980  */
981 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
982 {
983         struct drm_device *dev = crtc->base.dev;
984         struct drm_i915_private *dev_priv = dev->dev_private;
985         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
986         enum pipe pipe = crtc->pipe;
987 
988         if (INTEL_INFO(dev)->gen >= 4) {
989                 int reg = PIPECONF(cpu_transcoder);
990 
991                 /* Wait for the Pipe State to go off */
992                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
993                              100))
994                         WARN(1, "pipe_off wait timed out\n");
995         } else {
996                 /* Wait for the display line to settle */
997                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
998                         WARN(1, "pipe_off wait timed out\n");
999         }
1000 }
1001 
1002 /*
1003  * ibx_digital_port_connected - is the specified port connected?
1004  * @dev_priv: i915 private structure
1005  * @port: the port to test
1006  *
1007  * Returns true if @port is connected, false otherwise.
1008  */
1009 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
1010                                 struct intel_digital_port *port)
1011 {
1012         u32 bit;
1013 
1014         if (HAS_PCH_IBX(dev_priv->dev)) {
1015                 switch (port->port) {
1016                 case PORT_B:
1017                         bit = SDE_PORTB_HOTPLUG;
1018                         break;
1019                 case PORT_C:
1020                         bit = SDE_PORTC_HOTPLUG;
1021                         break;
1022                 case PORT_D:
1023                         bit = SDE_PORTD_HOTPLUG;
1024                         break;
1025                 default:
1026                         return true;
1027                 }
1028         } else {
1029                 switch (port->port) {
1030                 case PORT_B:
1031                         bit = SDE_PORTB_HOTPLUG_CPT;
1032                         break;
1033                 case PORT_C:
1034                         bit = SDE_PORTC_HOTPLUG_CPT;
1035                         break;
1036                 case PORT_D:
1037                         bit = SDE_PORTD_HOTPLUG_CPT;
1038                         break;
1039                 default:
1040                         return true;
1041                 }
1042         }
1043 
1044         return I915_READ(SDEISR) & bit;
1045 }
1046 
1047 static const char *state_string(bool enabled)
1048 {
1049         return enabled ? "on" : "off";
1050 }
1051 
1052 /* Only for pre-ILK configs */
1053 void assert_pll(struct drm_i915_private *dev_priv,
1054                 enum pipe pipe, bool state)
1055 {
1056         int reg;
1057         u32 val;
1058         bool cur_state;
1059 
1060         reg = DPLL(pipe);
1061         val = I915_READ(reg);
1062         cur_state = !!(val & DPLL_VCO_ENABLE);
1063         WARN(cur_state != state,
1064              "PLL state assertion failure (expected %s, current %s)\n",
1065              state_string(state), state_string(cur_state));
1066 }
1067 
1068 /* XXX: the dsi pll is shared between MIPI DSI ports */
1069 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1070 {
1071         u32 val;
1072         bool cur_state;
1073 
1074         mutex_lock(&dev_priv->dpio_lock);
1075         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1076         mutex_unlock(&dev_priv->dpio_lock);
1077 
1078         cur_state = val & DSI_PLL_VCO_EN;
1079         WARN(cur_state != state,
1080              "DSI PLL state assertion failure (expected %s, current %s)\n",
1081              state_string(state), state_string(cur_state));
1082 }
1083 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1084 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1085 
1086 struct intel_shared_dpll *
1087 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1088 {
1089         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1090 
1091         if (crtc->config.shared_dpll < 0)
1092                 return NULL;
1093 
1094         return &dev_priv->shared_dplls[crtc->config.shared_dpll];
1095 }
1096 
1097 /* For ILK+ */
1098 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1099                         struct intel_shared_dpll *pll,
1100                         bool state)
1101 {
1102         bool cur_state;
1103         struct intel_dpll_hw_state hw_state;
1104 
1105         if (WARN (!pll,
1106                   "asserting DPLL %s with no DPLL\n", state_string(state)))
1107                 return;
1108 
1109         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1110         WARN(cur_state != state,
1111              "%s assertion failure (expected %s, current %s)\n",
1112              pll->name, state_string(state), state_string(cur_state));
1113 }
1114 
1115 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1116                           enum pipe pipe, bool state)
1117 {
1118         int reg;
1119         u32 val;
1120         bool cur_state;
1121         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1122                                                                       pipe);
1123 
1124         if (HAS_DDI(dev_priv->dev)) {
1125                 /* DDI does not have a specific FDI_TX register */
1126                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1127                 val = I915_READ(reg);
1128                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1129         } else {
1130                 reg = FDI_TX_CTL(pipe);
1131                 val = I915_READ(reg);
1132                 cur_state = !!(val & FDI_TX_ENABLE);
1133         }
1134         WARN(cur_state != state,
1135              "FDI TX state assertion failure (expected %s, current %s)\n",
1136              state_string(state), state_string(cur_state));
1137 }
1138 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1139 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1140 
1141 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1142                           enum pipe pipe, bool state)
1143 {
1144         int reg;
1145         u32 val;
1146         bool cur_state;
1147 
1148         reg = FDI_RX_CTL(pipe);
1149         val = I915_READ(reg);
1150         cur_state = !!(val & FDI_RX_ENABLE);
1151         WARN(cur_state != state,
1152              "FDI RX state assertion failure (expected %s, current %s)\n",
1153              state_string(state), state_string(cur_state));
1154 }
1155 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1156 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1157 
1158 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1159                                       enum pipe pipe)
1160 {
1161         int reg;
1162         u32 val;
1163 
1164         /* ILK FDI PLL is always enabled */
1165         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1166                 return;
1167 
1168         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1169         if (HAS_DDI(dev_priv->dev))
1170                 return;
1171 
1172         reg = FDI_TX_CTL(pipe);
1173         val = I915_READ(reg);
1174         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1175 }
1176 
1177 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1178                        enum pipe pipe, bool state)
1179 {
1180         int reg;
1181         u32 val;
1182         bool cur_state;
1183 
1184         reg = FDI_RX_CTL(pipe);
1185         val = I915_READ(reg);
1186         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1187         WARN(cur_state != state,
1188              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1189              state_string(state), state_string(cur_state));
1190 }
1191 
1192 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1193                                   enum pipe pipe)
1194 {
1195         struct drm_device *dev = dev_priv->dev;
1196         int pp_reg;
1197         u32 val;
1198         enum pipe panel_pipe = PIPE_A;
1199         bool locked = true;
1200 
1201         if (WARN_ON(HAS_DDI(dev)))
1202                 return;
1203 
1204         if (HAS_PCH_SPLIT(dev)) {
1205                 u32 port_sel;
1206 
1207                 pp_reg = PCH_PP_CONTROL;
1208                 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1209 
1210                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1211                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1212                         panel_pipe = PIPE_B;
1213                 /* XXX: else fix for eDP */
1214         } else if (IS_VALLEYVIEW(dev)) {
1215                 /* presumably write lock depends on pipe, not port select */
1216                 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1217                 panel_pipe = pipe;
1218         } else {
1219                 pp_reg = PP_CONTROL;
1220                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1221                         panel_pipe = PIPE_B;
1222         }
1223 
1224         val = I915_READ(pp_reg);
1225         if (!(val & PANEL_POWER_ON) ||
1226             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1227                 locked = false;
1228 
1229         WARN(panel_pipe == pipe && locked,
1230              "panel assertion failure, pipe %c regs locked\n",
1231              pipe_name(pipe));
1232 }
1233 
1234 static void assert_cursor(struct drm_i915_private *dev_priv,
1235                           enum pipe pipe, bool state)
1236 {
1237         struct drm_device *dev = dev_priv->dev;
1238         bool cur_state;
1239 
1240         if (IS_845G(dev) || IS_I865G(dev))
1241                 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1242         else
1243                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1244 
1245         WARN(cur_state != state,
1246              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1247              pipe_name(pipe), state_string(state), state_string(cur_state));
1248 }
1249 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1250 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1251 
1252 void assert_pipe(struct drm_i915_private *dev_priv,
1253                  enum pipe pipe, bool state)
1254 {
1255         int reg;
1256         u32 val;
1257         bool cur_state;
1258         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1259                                                                       pipe);
1260 
1261         /* if we need the pipe quirk it must be always on */
1262         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1263             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1264                 state = true;
1265 
1266         if (!intel_display_power_enabled(dev_priv,
1267                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1268                 cur_state = false;
1269         } else {
1270                 reg = PIPECONF(cpu_transcoder);
1271                 val = I915_READ(reg);
1272                 cur_state = !!(val & PIPECONF_ENABLE);
1273         }
1274 
1275         WARN(cur_state != state,
1276              "pipe %c assertion failure (expected %s, current %s)\n",
1277              pipe_name(pipe), state_string(state), state_string(cur_state));
1278 }
1279 
1280 static void assert_plane(struct drm_i915_private *dev_priv,
1281                          enum plane plane, bool state)
1282 {
1283         int reg;
1284         u32 val;
1285         bool cur_state;
1286 
1287         reg = DSPCNTR(plane);
1288         val = I915_READ(reg);
1289         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1290         WARN(cur_state != state,
1291              "plane %c assertion failure (expected %s, current %s)\n",
1292              plane_name(plane), state_string(state), state_string(cur_state));
1293 }
1294 
1295 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1296 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1297 
1298 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1299                                    enum pipe pipe)
1300 {
1301         struct drm_device *dev = dev_priv->dev;
1302         int reg, i;
1303         u32 val;
1304         int cur_pipe;
1305 
1306         /* Primary planes are fixed to pipes on gen4+ */
1307         if (INTEL_INFO(dev)->gen >= 4) {
1308                 reg = DSPCNTR(pipe);
1309                 val = I915_READ(reg);
1310                 WARN(val & DISPLAY_PLANE_ENABLE,
1311                      "plane %c assertion failure, should be disabled but not\n",
1312                      plane_name(pipe));
1313                 return;
1314         }
1315 
1316         /* Need to check both planes against the pipe */
1317         for_each_pipe(dev_priv, i) {
1318                 reg = DSPCNTR(i);
1319                 val = I915_READ(reg);
1320                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1321                         DISPPLANE_SEL_PIPE_SHIFT;
1322                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1323                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1324                      plane_name(i), pipe_name(pipe));
1325         }
1326 }
1327 
1328 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1329                                     enum pipe pipe)
1330 {
1331         struct drm_device *dev = dev_priv->dev;
1332         int reg, sprite;
1333         u32 val;
1334 
1335         if (IS_VALLEYVIEW(dev)) {
1336                 for_each_sprite(pipe, sprite) {
1337                         reg = SPCNTR(pipe, sprite);
1338                         val = I915_READ(reg);
1339                         WARN(val & SP_ENABLE,
1340                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1341                              sprite_name(pipe, sprite), pipe_name(pipe));
1342                 }
1343         } else if (INTEL_INFO(dev)->gen >= 7) {
1344                 reg = SPRCTL(pipe);
1345                 val = I915_READ(reg);
1346                 WARN(val & SPRITE_ENABLE,
1347                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1348                      plane_name(pipe), pipe_name(pipe));
1349         } else if (INTEL_INFO(dev)->gen >= 5) {
1350                 reg = DVSCNTR(pipe);
1351                 val = I915_READ(reg);
1352                 WARN(val & DVS_ENABLE,
1353                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1354                      plane_name(pipe), pipe_name(pipe));
1355         }
1356 }
1357 
1358 static void assert_vblank_disabled(struct drm_crtc *crtc)
1359 {
1360         if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1361                 drm_crtc_vblank_put(crtc);
1362 }
1363 
1364 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1365 {
1366         u32 val;
1367         bool enabled;
1368 
1369         WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1370 
1371         val = I915_READ(PCH_DREF_CONTROL);
1372         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1373                             DREF_SUPERSPREAD_SOURCE_MASK));
1374         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1375 }
1376 
1377 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1378                                            enum pipe pipe)
1379 {
1380         int reg;
1381         u32 val;
1382         bool enabled;
1383 
1384         reg = PCH_TRANSCONF(pipe);
1385         val = I915_READ(reg);
1386         enabled = !!(val & TRANS_ENABLE);
1387         WARN(enabled,
1388              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1389              pipe_name(pipe));
1390 }
1391 
1392 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1393                             enum pipe pipe, u32 port_sel, u32 val)
1394 {
1395         if ((val & DP_PORT_EN) == 0)
1396                 return false;
1397 
1398         if (HAS_PCH_CPT(dev_priv->dev)) {
1399                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1400                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1401                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1402                         return false;
1403         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1404                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1405                         return false;
1406         } else {
1407                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1408                         return false;
1409         }
1410         return true;
1411 }
1412 
1413 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1414                               enum pipe pipe, u32 val)
1415 {
1416         if ((val & SDVO_ENABLE) == 0)
1417                 return false;
1418 
1419         if (HAS_PCH_CPT(dev_priv->dev)) {
1420                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1421                         return false;
1422         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1423                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1424                         return false;
1425         } else {
1426                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1427                         return false;
1428         }
1429         return true;
1430 }
1431 
1432 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1433                               enum pipe pipe, u32 val)
1434 {
1435         if ((val & LVDS_PORT_EN) == 0)
1436                 return false;
1437 
1438         if (HAS_PCH_CPT(dev_priv->dev)) {
1439                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1440                         return false;
1441         } else {
1442                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1443                         return false;
1444         }
1445         return true;
1446 }
1447 
1448 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1449                               enum pipe pipe, u32 val)
1450 {
1451         if ((val & ADPA_DAC_ENABLE) == 0)
1452                 return false;
1453         if (HAS_PCH_CPT(dev_priv->dev)) {
1454                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1455                         return false;
1456         } else {
1457                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1458                         return false;
1459         }
1460         return true;
1461 }
1462 
1463 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1464                                    enum pipe pipe, int reg, u32 port_sel)
1465 {
1466         u32 val = I915_READ(reg);
1467         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1468              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1469              reg, pipe_name(pipe));
1470 
1471         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1472              && (val & DP_PIPEB_SELECT),
1473              "IBX PCH dp port still using transcoder B\n");
1474 }
1475 
1476 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1477                                      enum pipe pipe, int reg)
1478 {
1479         u32 val = I915_READ(reg);
1480         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1481              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1482              reg, pipe_name(pipe));
1483 
1484         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1485              && (val & SDVO_PIPE_B_SELECT),
1486              "IBX PCH hdmi port still using transcoder B\n");
1487 }
1488 
1489 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1490                                       enum pipe pipe)
1491 {
1492         int reg;
1493         u32 val;
1494 
1495         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1496         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1497         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1498 
1499         reg = PCH_ADPA;
1500         val = I915_READ(reg);
1501         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1502              "PCH VGA enabled on transcoder %c, should be disabled\n",
1503              pipe_name(pipe));
1504 
1505         reg = PCH_LVDS;
1506         val = I915_READ(reg);
1507         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1508              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1509              pipe_name(pipe));
1510 
1511         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1512         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1513         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1514 }
1515 
1516 static void intel_init_dpio(struct drm_device *dev)
1517 {
1518         struct drm_i915_private *dev_priv = dev->dev_private;
1519 
1520         if (!IS_VALLEYVIEW(dev))
1521                 return;
1522 
1523         /*
1524          * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1525          * CHV x1 PHY (DP/HDMI D)
1526          * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1527          */
1528         if (IS_CHERRYVIEW(dev)) {
1529                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1530                 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1531         } else {
1532                 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1533         }
1534 }
1535 
1536 static void vlv_enable_pll(struct intel_crtc *crtc)
1537 {
1538         struct drm_device *dev = crtc->base.dev;
1539         struct drm_i915_private *dev_priv = dev->dev_private;
1540         int reg = DPLL(crtc->pipe);
1541         u32 dpll = crtc->config.dpll_hw_state.dpll;
1542 
1543         assert_pipe_disabled(dev_priv, crtc->pipe);
1544 
1545         /* No really, not for ILK+ */
1546         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1547 
1548         /* PLL is protected by panel, make sure we can write it */
1549         if (IS_MOBILE(dev_priv->dev))
1550                 assert_panel_unlocked(dev_priv, crtc->pipe);
1551 
1552         I915_WRITE(reg, dpll);
1553         POSTING_READ(reg);
1554         udelay(150);
1555 
1556         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1557                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1558 
1559         I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1560         POSTING_READ(DPLL_MD(crtc->pipe));
1561 
1562         /* We do this three times for luck */
1563         I915_WRITE(reg, dpll);
1564         POSTING_READ(reg);
1565         udelay(150); /* wait for warmup */
1566         I915_WRITE(reg, dpll);
1567         POSTING_READ(reg);
1568         udelay(150); /* wait for warmup */
1569         I915_WRITE(reg, dpll);
1570         POSTING_READ(reg);
1571         udelay(150); /* wait for warmup */
1572 }
1573 
1574 static void chv_enable_pll(struct intel_crtc *crtc)
1575 {
1576         struct drm_device *dev = crtc->base.dev;
1577         struct drm_i915_private *dev_priv = dev->dev_private;
1578         int pipe = crtc->pipe;
1579         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1580         u32 tmp;
1581 
1582         assert_pipe_disabled(dev_priv, crtc->pipe);
1583 
1584         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1585 
1586         mutex_lock(&dev_priv->dpio_lock);
1587 
1588         /* Enable back the 10bit clock to display controller */
1589         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1590         tmp |= DPIO_DCLKP_EN;
1591         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1592 
1593         /*
1594          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1595          */
1596         udelay(1);
1597 
1598         /* Enable PLL */
1599         I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
1600 
1601         /* Check PLL is locked */
1602         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1603                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1604 
1605         /* not sure when this should be written */
1606         I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
1607         POSTING_READ(DPLL_MD(pipe));
1608 
1609         mutex_unlock(&dev_priv->dpio_lock);
1610 }
1611 
1612 static int intel_num_dvo_pipes(struct drm_device *dev)
1613 {
1614         struct intel_crtc *crtc;
1615         int count = 0;
1616 
1617         for_each_intel_crtc(dev, crtc)
1618                 count += crtc->active &&
1619                         intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
1620 
1621         return count;
1622 }
1623 
1624 static void i9xx_enable_pll(struct intel_crtc *crtc)
1625 {
1626         struct drm_device *dev = crtc->base.dev;
1627         struct drm_i915_private *dev_priv = dev->dev_private;
1628         int reg = DPLL(crtc->pipe);
1629         u32 dpll = crtc->config.dpll_hw_state.dpll;
1630 
1631         assert_pipe_disabled(dev_priv, crtc->pipe);
1632 
1633         /* No really, not for ILK+ */
1634         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1635 
1636         /* PLL is protected by panel, make sure we can write it */
1637         if (IS_MOBILE(dev) && !IS_I830(dev))
1638                 assert_panel_unlocked(dev_priv, crtc->pipe);
1639 
1640         /* Enable DVO 2x clock on both PLLs if necessary */
1641         if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1642                 /*
1643                  * It appears to be important that we don't enable this
1644                  * for the current pipe before otherwise configuring the
1645                  * PLL. No idea how this should be handled if multiple
1646                  * DVO outputs are enabled simultaneosly.
1647                  */
1648                 dpll |= DPLL_DVO_2X_MODE;
1649                 I915_WRITE(DPLL(!crtc->pipe),
1650                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1651         }
1652 
1653         /* Wait for the clocks to stabilize. */
1654         POSTING_READ(reg);
1655         udelay(150);
1656 
1657         if (INTEL_INFO(dev)->gen >= 4) {
1658                 I915_WRITE(DPLL_MD(crtc->pipe),
1659                            crtc->config.dpll_hw_state.dpll_md);
1660         } else {
1661                 /* The pixel multiplier can only be updated once the
1662                  * DPLL is enabled and the clocks are stable.
1663                  *
1664                  * So write it again.
1665                  */
1666                 I915_WRITE(reg, dpll);
1667         }
1668 
1669         /* We do this three times for luck */
1670         I915_WRITE(reg, dpll);
1671         POSTING_READ(reg);
1672         udelay(150); /* wait for warmup */
1673         I915_WRITE(reg, dpll);
1674         POSTING_READ(reg);
1675         udelay(150); /* wait for warmup */
1676         I915_WRITE(reg, dpll);
1677         POSTING_READ(reg);
1678         udelay(150); /* wait for warmup */
1679 }
1680 
1681 /**
1682  * i9xx_disable_pll - disable a PLL
1683  * @dev_priv: i915 private structure
1684  * @pipe: pipe PLL to disable
1685  *
1686  * Disable the PLL for @pipe, making sure the pipe is off first.
1687  *
1688  * Note!  This is for pre-ILK only.
1689  */
1690 static void i9xx_disable_pll(struct intel_crtc *crtc)
1691 {
1692         struct drm_device *dev = crtc->base.dev;
1693         struct drm_i915_private *dev_priv = dev->dev_private;
1694         enum pipe pipe = crtc->pipe;
1695 
1696         /* Disable DVO 2x clock on both PLLs if necessary */
1697         if (IS_I830(dev) &&
1698             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
1699             intel_num_dvo_pipes(dev) == 1) {
1700                 I915_WRITE(DPLL(PIPE_B),
1701                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1702                 I915_WRITE(DPLL(PIPE_A),
1703                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1704         }
1705 
1706         /* Don't disable pipe or pipe PLLs if needed */
1707         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1708             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1709                 return;
1710 
1711         /* Make sure the pipe isn't still relying on us */
1712         assert_pipe_disabled(dev_priv, pipe);
1713 
1714         I915_WRITE(DPLL(pipe), 0);
1715         POSTING_READ(DPLL(pipe));
1716 }
1717 
1718 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1719 {
1720         u32 val = 0;
1721 
1722         /* Make sure the pipe isn't still relying on us */
1723         assert_pipe_disabled(dev_priv, pipe);
1724 
1725         /*
1726          * Leave integrated clock source and reference clock enabled for pipe B.
1727          * The latter is needed for VGA hotplug / manual detection.
1728          */
1729         if (pipe == PIPE_B)
1730                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV;
1731         I915_WRITE(DPLL(pipe), val);
1732         POSTING_READ(DPLL(pipe));
1733 
1734 }
1735 
1736 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1737 {
1738         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1739         u32 val;
1740 
1741         /* Make sure the pipe isn't still relying on us */
1742         assert_pipe_disabled(dev_priv, pipe);
1743 
1744         /* Set PLL en = 0 */
1745         val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1746         if (pipe != PIPE_A)
1747                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1748         I915_WRITE(DPLL(pipe), val);
1749         POSTING_READ(DPLL(pipe));
1750 
1751         mutex_lock(&dev_priv->dpio_lock);
1752 
1753         /* Disable 10bit clock to display controller */
1754         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1755         val &= ~DPIO_DCLKP_EN;
1756         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1757 
1758         /* disable left/right clock distribution */
1759         if (pipe != PIPE_B) {
1760                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1761                 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1762                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1763         } else {
1764                 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1765                 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1766                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1767         }
1768 
1769         mutex_unlock(&dev_priv->dpio_lock);
1770 }
1771 
1772 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1773                 struct intel_digital_port *dport)
1774 {
1775         u32 port_mask;
1776         int dpll_reg;
1777 
1778         switch (dport->port) {
1779         case PORT_B:
1780                 port_mask = DPLL_PORTB_READY_MASK;
1781                 dpll_reg = DPLL(0);
1782                 break;
1783         case PORT_C:
1784                 port_mask = DPLL_PORTC_READY_MASK;
1785                 dpll_reg = DPLL(0);
1786                 break;
1787         case PORT_D:
1788                 port_mask = DPLL_PORTD_READY_MASK;
1789                 dpll_reg = DPIO_PHY_STATUS;
1790                 break;
1791         default:
1792                 BUG();
1793         }
1794 
1795         if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
1796                 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1797                      port_name(dport->port), I915_READ(dpll_reg));
1798 }
1799 
1800 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1801 {
1802         struct drm_device *dev = crtc->base.dev;
1803         struct drm_i915_private *dev_priv = dev->dev_private;
1804         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1805 
1806         if (WARN_ON(pll == NULL))
1807                 return;
1808 
1809         WARN_ON(!pll->refcount);
1810         if (pll->active == 0) {
1811                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1812                 WARN_ON(pll->on);
1813                 assert_shared_dpll_disabled(dev_priv, pll);
1814 
1815                 pll->mode_set(dev_priv, pll);
1816         }
1817 }
1818 
1819 /**
1820  * intel_enable_shared_dpll - enable PCH PLL
1821  * @dev_priv: i915 private structure
1822  * @pipe: pipe PLL to enable
1823  *
1824  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1825  * drives the transcoder clock.
1826  */
1827 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1828 {
1829         struct drm_device *dev = crtc->base.dev;
1830         struct drm_i915_private *dev_priv = dev->dev_private;
1831         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1832 
1833         if (WARN_ON(pll == NULL))
1834                 return;
1835 
1836         if (WARN_ON(pll->refcount == 0))
1837                 return;
1838 
1839         DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1840                       pll->name, pll->active, pll->on,
1841                       crtc->base.base.id);
1842 
1843         if (pll->active++) {
1844                 WARN_ON(!pll->on);
1845                 assert_shared_dpll_enabled(dev_priv, pll);
1846                 return;
1847         }
1848         WARN_ON(pll->on);
1849 
1850         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1851 
1852         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1853         pll->enable(dev_priv, pll);
1854         pll->on = true;
1855 }
1856 
1857 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1858 {
1859         struct drm_device *dev = crtc->base.dev;
1860         struct drm_i915_private *dev_priv = dev->dev_private;
1861         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1862 
1863         /* PCH only available on ILK+ */
1864         BUG_ON(INTEL_INFO(dev)->gen < 5);
1865         if (WARN_ON(pll == NULL))
1866                return;
1867 
1868         if (WARN_ON(pll->refcount == 0))
1869                 return;
1870 
1871         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1872                       pll->name, pll->active, pll->on,
1873                       crtc->base.base.id);
1874 
1875         if (WARN_ON(pll->active == 0)) {
1876                 assert_shared_dpll_disabled(dev_priv, pll);
1877                 return;
1878         }
1879 
1880         assert_shared_dpll_enabled(dev_priv, pll);
1881         WARN_ON(!pll->on);
1882         if (--pll->active)
1883                 return;
1884 
1885         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1886         pll->disable(dev_priv, pll);
1887         pll->on = false;
1888 
1889         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1890 }
1891 
1892 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1893                                            enum pipe pipe)
1894 {
1895         struct drm_device *dev = dev_priv->dev;
1896         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1897         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1898         uint32_t reg, val, pipeconf_val;
1899 
1900         /* PCH only available on ILK+ */
1901         BUG_ON(!HAS_PCH_SPLIT(dev));
1902 
1903         /* Make sure PCH DPLL is enabled */
1904         assert_shared_dpll_enabled(dev_priv,
1905                                    intel_crtc_to_shared_dpll(intel_crtc));
1906 
1907         /* FDI must be feeding us bits for PCH ports */
1908         assert_fdi_tx_enabled(dev_priv, pipe);
1909         assert_fdi_rx_enabled(dev_priv, pipe);
1910 
1911         if (HAS_PCH_CPT(dev)) {
1912                 /* Workaround: Set the timing override bit before enabling the
1913                  * pch transcoder. */
1914                 reg = TRANS_CHICKEN2(pipe);
1915                 val = I915_READ(reg);
1916                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1917                 I915_WRITE(reg, val);
1918         }
1919 
1920         reg = PCH_TRANSCONF(pipe);
1921         val = I915_READ(reg);
1922         pipeconf_val = I915_READ(PIPECONF(pipe));
1923 
1924         if (HAS_PCH_IBX(dev_priv->dev)) {
1925                 /*
1926                  * make the BPC in transcoder be consistent with
1927                  * that in pipeconf reg.
1928                  */
1929                 val &= ~PIPECONF_BPC_MASK;
1930                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1931         }
1932 
1933         val &= ~TRANS_INTERLACE_MASK;
1934         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1935                 if (HAS_PCH_IBX(dev_priv->dev) &&
1936                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1937                         val |= TRANS_LEGACY_INTERLACED_ILK;
1938                 else
1939                         val |= TRANS_INTERLACED;
1940         else
1941                 val |= TRANS_PROGRESSIVE;
1942 
1943         I915_WRITE(reg, val | TRANS_ENABLE);
1944         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1945                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1946 }
1947 
1948 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1949                                       enum transcoder cpu_transcoder)
1950 {
1951         u32 val, pipeconf_val;
1952 
1953         /* PCH only available on ILK+ */
1954         BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
1955 
1956         /* FDI must be feeding us bits for PCH ports */
1957         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1958         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1959 
1960         /* Workaround: set timing override bit. */
1961         val = I915_READ(_TRANSA_CHICKEN2);
1962         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1963         I915_WRITE(_TRANSA_CHICKEN2, val);
1964 
1965         val = TRANS_ENABLE;
1966         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1967 
1968         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1969             PIPECONF_INTERLACED_ILK)
1970                 val |= TRANS_INTERLACED;
1971         else
1972                 val |= TRANS_PROGRESSIVE;
1973 
1974         I915_WRITE(LPT_TRANSCONF, val);
1975         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1976                 DRM_ERROR("Failed to enable PCH transcoder\n");
1977 }
1978 
1979 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1980                                             enum pipe pipe)
1981 {
1982         struct drm_device *dev = dev_priv->dev;
1983         uint32_t reg, val;
1984 
1985         /* FDI relies on the transcoder */
1986         assert_fdi_tx_disabled(dev_priv, pipe);
1987         assert_fdi_rx_disabled(dev_priv, pipe);
1988 
1989         /* Ports must be off as well */
1990         assert_pch_ports_disabled(dev_priv, pipe);
1991 
1992         reg = PCH_TRANSCONF(pipe);
1993         val = I915_READ(reg);
1994         val &= ~TRANS_ENABLE;
1995         I915_WRITE(reg, val);
1996         /* wait for PCH transcoder off, transcoder state */
1997         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1998                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1999 
2000         if (!HAS_PCH_IBX(dev)) {
2001                 /* Workaround: Clear the timing override chicken bit again. */
2002                 reg = TRANS_CHICKEN2(pipe);
2003                 val = I915_READ(reg);
2004                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2005                 I915_WRITE(reg, val);
2006         }
2007 }
2008 
2009 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2010 {
2011         u32 val;
2012 
2013         val = I915_READ(LPT_TRANSCONF);
2014         val &= ~TRANS_ENABLE;
2015         I915_WRITE(LPT_TRANSCONF, val);
2016         /* wait for PCH transcoder off, transcoder state */
2017         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2018                 DRM_ERROR("Failed to disable PCH transcoder\n");
2019 
2020         /* Workaround: clear timing override bit. */
2021         val = I915_READ(_TRANSA_CHICKEN2);
2022         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2023         I915_WRITE(_TRANSA_CHICKEN2, val);
2024 }
2025 
2026 /**
2027  * intel_enable_pipe - enable a pipe, asserting requirements
2028  * @crtc: crtc responsible for the pipe
2029  *
2030  * Enable @crtc's pipe, making sure that various hardware specific requirements
2031  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2032  */
2033 static void intel_enable_pipe(struct intel_crtc *crtc)
2034 {
2035         struct drm_device *dev = crtc->base.dev;
2036         struct drm_i915_private *dev_priv = dev->dev_private;
2037         enum pipe pipe = crtc->pipe;
2038         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2039                                                                       pipe);
2040         enum pipe pch_transcoder;
2041         int reg;
2042         u32 val;
2043 
2044         assert_planes_disabled(dev_priv, pipe);
2045         assert_cursor_disabled(dev_priv, pipe);
2046         assert_sprites_disabled(dev_priv, pipe);
2047 
2048         if (HAS_PCH_LPT(dev_priv->dev))
2049                 pch_transcoder = TRANSCODER_A;
2050         else
2051                 pch_transcoder = pipe;
2052 
2053         /*
2054          * A pipe without a PLL won't actually be able to drive bits from
2055          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2056          * need the check.
2057          */
2058         if (!HAS_PCH_SPLIT(dev_priv->dev))
2059                 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
2060                         assert_dsi_pll_enabled(dev_priv);
2061                 else
2062                         assert_pll_enabled(dev_priv, pipe);
2063         else {
2064                 if (crtc->config.has_pch_encoder) {
2065                         /* if driving the PCH, we need FDI enabled */
2066                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2067                         assert_fdi_tx_pll_enabled(dev_priv,
2068                                                   (enum pipe) cpu_transcoder);
2069                 }
2070                 /* FIXME: assert CPU port conditions for SNB+ */
2071         }
2072 
2073         reg = PIPECONF(cpu_transcoder);
2074         val = I915_READ(reg);
2075         if (val & PIPECONF_ENABLE) {
2076                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2077                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2078                 return;
2079         }
2080 
2081         I915_WRITE(reg, val | PIPECONF_ENABLE);
2082         POSTING_READ(reg);
2083 }
2084 
2085 /**
2086  * intel_disable_pipe - disable a pipe, asserting requirements
2087  * @crtc: crtc whose pipes is to be disabled
2088  *
2089  * Disable the pipe of @crtc, making sure that various hardware
2090  * specific requirements are met, if applicable, e.g. plane
2091  * disabled, panel fitter off, etc.
2092  *
2093  * Will wait until the pipe has shut down before returning.
2094  */
2095 static void intel_disable_pipe(struct intel_crtc *crtc)
2096 {
2097         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2098         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2099         enum pipe pipe = crtc->pipe;
2100         int reg;
2101         u32 val;
2102 
2103         /*
2104          * Make sure planes won't keep trying to pump pixels to us,
2105          * or we might hang the display.
2106          */
2107         assert_planes_disabled(dev_priv, pipe);
2108         assert_cursor_disabled(dev_priv, pipe);
2109         assert_sprites_disabled(dev_priv, pipe);
2110 
2111         reg = PIPECONF(cpu_transcoder);
2112         val = I915_READ(reg);
2113         if ((val & PIPECONF_ENABLE) == 0)
2114                 return;
2115 
2116         /*
2117          * Double wide has implications for planes
2118          * so best keep it disabled when not needed.
2119          */
2120         if (crtc->config.double_wide)
2121                 val &= ~PIPECONF_DOUBLE_WIDE;
2122 
2123         /* Don't disable pipe or pipe PLLs if needed */
2124         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2125             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2126                 val &= ~PIPECONF_ENABLE;
2127 
2128         I915_WRITE(reg, val);
2129         if ((val & PIPECONF_ENABLE) == 0)
2130                 intel_wait_for_pipe_off(crtc);
2131 }
2132 
2133 /*
2134  * Plane regs are double buffered, going from enabled->disabled needs a
2135  * trigger in order to latch.  The display address reg provides this.
2136  */
2137 void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2138                                enum plane plane)
2139 {
2140         struct drm_device *dev = dev_priv->dev;
2141         u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
2142 
2143         I915_WRITE(reg, I915_READ(reg));
2144         POSTING_READ(reg);
2145 }
2146 
2147 /**
2148  * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2149  * @plane:  plane to be enabled
2150  * @crtc: crtc for the plane
2151  *
2152  * Enable @plane on @crtc, making sure that the pipe is running first.
2153  */
2154 static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2155                                           struct drm_crtc *crtc)
2156 {
2157         struct drm_device *dev = plane->dev;
2158         struct drm_i915_private *dev_priv = dev->dev_private;
2159         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2160 
2161         /* If the pipe isn't enabled, we can't pump pixels and may hang */
2162         assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2163 
2164         if (intel_crtc->primary_enabled)
2165                 return;
2166 
2167         intel_crtc->primary_enabled = true;
2168 
2169         dev_priv->display.update_primary_plane(crtc, plane->fb,
2170                                                crtc->x, crtc->y);
2171 
2172         /*
2173          * BDW signals flip done immediately if the plane
2174          * is disabled, even if the plane enable is already
2175          * armed to occur at the next vblank :(
2176          */
2177         if (IS_BROADWELL(dev))
2178                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2179 }
2180 
2181 /**
2182  * intel_disable_primary_hw_plane - disable the primary hardware plane
2183  * @plane: plane to be disabled
2184  * @crtc: crtc for the plane
2185  *
2186  * Disable @plane on @crtc, making sure that the pipe is running first.
2187  */
2188 static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2189                                            struct drm_crtc *crtc)
2190 {
2191         struct drm_device *dev = plane->dev;
2192         struct drm_i915_private *dev_priv = dev->dev_private;
2193         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2194 
2195         assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2196 
2197         if (!intel_crtc->primary_enabled)
2198                 return;
2199 
2200         intel_crtc->primary_enabled = false;
2201 
2202         dev_priv->display.update_primary_plane(crtc, plane->fb,
2203                                                crtc->x, crtc->y);
2204 }
2205 
2206 static bool need_vtd_wa(struct drm_device *dev)
2207 {
2208 #ifdef CONFIG_INTEL_IOMMU
2209         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2210                 return true;
2211 #endif
2212         return false;
2213 }
2214 
2215 static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2216 {
2217         int tile_height;
2218 
2219         tile_height = tiled ? (IS_GEN2(dev) ? 16 : 8) : 1;
2220         return ALIGN(height, tile_height);
2221 }
2222 
2223 int
2224 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2225                            struct drm_i915_gem_object *obj,
2226                            struct intel_engine_cs *pipelined)
2227 {
2228         struct drm_i915_private *dev_priv = dev->dev_private;
2229         u32 alignment;
2230         int ret;
2231 
2232         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2233 
2234         switch (obj->tiling_mode) {
2235         case I915_TILING_NONE:
2236                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2237                         alignment = 128 * 1024;
2238                 else if (INTEL_INFO(dev)->gen >= 4)
2239                         alignment = 4 * 1024;
2240                 else
2241                         alignment = 64 * 1024;
2242                 break;
2243         case I915_TILING_X:
2244                 /* pin() will align the object as required by fence */
2245                 alignment = 0;
2246                 break;
2247         case I915_TILING_Y:
2248                 WARN(1, "Y tiled bo slipped through, driver bug!\n");
2249                 return -EINVAL;
2250         default:
2251                 BUG();
2252         }
2253 
2254         /* Note that the w/a also requires 64 PTE of padding following the
2255          * bo. We currently fill all unused PTE with the shadow page and so
2256          * we should always have valid PTE following the scanout preventing
2257          * the VT-d warning.
2258          */
2259         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2260                 alignment = 256 * 1024;
2261 
2262         /*
2263          * Global gtt pte registers are special registers which actually forward
2264          * writes to a chunk of system memory. Which means that there is no risk
2265          * that the register values disappear as soon as we call
2266          * intel_runtime_pm_put(), so it is correct to wrap only the
2267          * pin/unpin/fence and not more.
2268          */
2269         intel_runtime_pm_get(dev_priv);
2270 
2271         dev_priv->mm.interruptible = false;
2272         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2273         if (ret)
2274                 goto err_interruptible;
2275 
2276         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2277          * fence, whereas 965+ only requires a fence if using
2278          * framebuffer compression.  For simplicity, we always install
2279          * a fence as the cost is not that onerous.
2280          */
2281         ret = i915_gem_object_get_fence(obj);
2282         if (ret)
2283                 goto err_unpin;
2284 
2285         i915_gem_object_pin_fence(obj);
2286 
2287         dev_priv->mm.interruptible = true;
2288         intel_runtime_pm_put(dev_priv);
2289         return 0;
2290 
2291 err_unpin:
2292         i915_gem_object_unpin_from_display_plane(obj);
2293 err_interruptible:
2294         dev_priv->mm.interruptible = true;
2295         intel_runtime_pm_put(dev_priv);
2296         return ret;
2297 }
2298 
2299 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2300 {
2301         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2302 
2303         i915_gem_object_unpin_fence(obj);
2304         i915_gem_object_unpin_from_display_plane(obj);
2305 }
2306 
2307 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2308  * is assumed to be a power-of-two. */
2309 unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2310                                              unsigned int tiling_mode,
2311                                              unsigned int cpp,
2312                                              unsigned int pitch)
2313 {
2314         if (tiling_mode != I915_TILING_NONE) {
2315                 unsigned int tile_rows, tiles;
2316 
2317                 tile_rows = *y / 8;
2318                 *y %= 8;
2319 
2320                 tiles = *x / (512/cpp);
2321                 *x %= 512/cpp;
2322 
2323                 return tile_rows * pitch * 8 + tiles * 4096;
2324         } else {
2325                 unsigned int offset;
2326 
2327                 offset = *y * pitch + *x * cpp;
2328                 *y = 0;
2329                 *x = (offset & 4095) / cpp;
2330                 return offset & -4096;
2331         }
2332 }
2333 
2334 int intel_format_to_fourcc(int format)
2335 {
2336         switch (format) {
2337         case DISPPLANE_8BPP:
2338                 return DRM_FORMAT_C8;
2339         case DISPPLANE_BGRX555:
2340                 return DRM_FORMAT_XRGB1555;
2341         case DISPPLANE_BGRX565:
2342                 return DRM_FORMAT_RGB565;
2343         default:
2344         case DISPPLANE_BGRX888:
2345                 return DRM_FORMAT_XRGB8888;
2346         case DISPPLANE_RGBX888:
2347                 return DRM_FORMAT_XBGR8888;
2348         case DISPPLANE_BGRX101010:
2349                 return DRM_FORMAT_XRGB2101010;
2350         case DISPPLANE_RGBX101010:
2351                 return DRM_FORMAT_XBGR2101010;
2352         }
2353 }
2354 
2355 static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
2356                                   struct intel_plane_config *plane_config)
2357 {
2358         struct drm_device *dev = crtc->base.dev;
2359         struct drm_i915_gem_object *obj = NULL;
2360         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2361         u32 base = plane_config->base;
2362 
2363         if (plane_config->size == 0)
2364                 return false;
2365 
2366         obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base,
2367                                                              plane_config->size);
2368         if (!obj)
2369                 return false;
2370 
2371         if (plane_config->tiled) {
2372                 obj->tiling_mode = I915_TILING_X;
2373                 obj->stride = crtc->base.primary->fb->pitches[0];
2374         }
2375 
2376         mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format;
2377         mode_cmd.width = crtc->base.primary->fb->width;
2378         mode_cmd.height = crtc->base.primary->fb->height;
2379         mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0];
2380 
2381         mutex_lock(&dev->struct_mutex);
2382 
2383         if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb),
2384                                    &mode_cmd, obj)) {
2385                 DRM_DEBUG_KMS("intel fb init failed\n");
2386                 goto out_unref_obj;
2387         }
2388 
2389         obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
2390         mutex_unlock(&dev->struct_mutex);
2391 
2392         DRM_DEBUG_KMS("plane fb obj %p\n", obj);
2393         return true;
2394 
2395 out_unref_obj:
2396         drm_gem_object_unreference(&obj->base);
2397         mutex_unlock(&dev->struct_mutex);
2398         return false;
2399 }
2400 
2401 static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2402                                  struct intel_plane_config *plane_config)
2403 {
2404         struct drm_device *dev = intel_crtc->base.dev;
2405         struct drm_crtc *c;
2406         struct intel_crtc *i;
2407         struct drm_i915_gem_object *obj;
2408 
2409         if (!intel_crtc->base.primary->fb)
2410                 return;
2411 
2412         if (intel_alloc_plane_obj(intel_crtc, plane_config))
2413                 return;
2414 
2415         kfree(intel_crtc->base.primary->fb);
2416         intel_crtc->base.primary->fb = NULL;
2417 
2418         /*
2419          * Failed to alloc the obj, check to see if we should share
2420          * an fb with another CRTC instead
2421          */
2422         for_each_crtc(dev, c) {
2423                 i = to_intel_crtc(c);
2424 
2425                 if (c == &intel_crtc->base)
2426                         continue;
2427 
2428                 if (!i->active)
2429                         continue;
2430 
2431                 obj = intel_fb_obj(c->primary->fb);
2432                 if (obj == NULL)
2433                         continue;
2434 
2435                 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2436                         drm_framebuffer_reference(c->primary->fb);
2437                         intel_crtc->base.primary->fb = c->primary->fb;
2438                         obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2439                         break;
2440                 }
2441         }
2442 }
2443 
2444 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2445                                       struct drm_framebuffer *fb,
2446                                       int x, int y)
2447 {
2448         struct drm_device *dev = crtc->dev;
2449         struct drm_i915_private *dev_priv = dev->dev_private;
2450         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2451         struct drm_i915_gem_object *obj;
2452         int plane = intel_crtc->plane;
2453         unsigned long linear_offset;
2454         u32 dspcntr;
2455         u32 reg = DSPCNTR(plane);
2456         int pixel_size;
2457 
2458         if (!intel_crtc->primary_enabled) {
2459                 I915_WRITE(reg, 0);
2460                 if (INTEL_INFO(dev)->gen >= 4)
2461                         I915_WRITE(DSPSURF(plane), 0);
2462                 else
2463                         I915_WRITE(DSPADDR(plane), 0);
2464                 POSTING_READ(reg);
2465                 return;
2466         }
2467 
2468         obj = intel_fb_obj(fb);
2469         if (WARN_ON(obj == NULL))
2470                 return;
2471 
2472         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2473 
2474         dspcntr = DISPPLANE_GAMMA_ENABLE;
2475 
2476         dspcntr |= DISPLAY_PLANE_ENABLE;
2477 
2478         if (INTEL_INFO(dev)->gen < 4) {
2479                 if (intel_crtc->pipe == PIPE_B)
2480                         dspcntr |= DISPPLANE_SEL_PIPE_B;
2481 
2482                 /* pipesrc and dspsize control the size that is scaled from,
2483                  * which should always be the user's requested size.
2484                  */
2485                 I915_WRITE(DSPSIZE(plane),
2486                            ((intel_crtc->config.pipe_src_h - 1) << 16) |
2487                            (intel_crtc->config.pipe_src_w - 1));
2488                 I915_WRITE(DSPPOS(plane), 0);
2489         }
2490 
2491         switch (fb->pixel_format) {
2492         case DRM_FORMAT_C8:
2493                 dspcntr |= DISPPLANE_8BPP;
2494                 break;
2495         case DRM_FORMAT_XRGB1555:
2496         case DRM_FORMAT_ARGB1555:
2497                 dspcntr |= DISPPLANE_BGRX555;
2498                 break;
2499         case DRM_FORMAT_RGB565:
2500                 dspcntr |= DISPPLANE_BGRX565;
2501                 break;
2502         case DRM_FORMAT_XRGB8888:
2503         case DRM_FORMAT_ARGB8888:
2504                 dspcntr |= DISPPLANE_BGRX888;
2505                 break;
2506         case DRM_FORMAT_XBGR8888:
2507         case DRM_FORMAT_ABGR8888:
2508                 dspcntr |= DISPPLANE_RGBX888;
2509                 break;
2510         case DRM_FORMAT_XRGB2101010:
2511         case DRM_FORMAT_ARGB2101010:
2512                 dspcntr |= DISPPLANE_BGRX101010;
2513                 break;
2514         case DRM_FORMAT_XBGR2101010:
2515         case DRM_FORMAT_ABGR2101010:
2516                 dspcntr |= DISPPLANE_RGBX101010;
2517                 break;
2518         default:
2519                 BUG();
2520         }
2521 
2522         if (INTEL_INFO(dev)->gen >= 4 &&
2523             obj->tiling_mode != I915_TILING_NONE)
2524                 dspcntr |= DISPPLANE_TILED;
2525 
2526         if (IS_G4X(dev))
2527                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2528 
2529         linear_offset = y * fb->pitches[0] + x * pixel_size;
2530 
2531         if (INTEL_INFO(dev)->gen >= 4) {
2532                 intel_crtc->dspaddr_offset =
2533                         intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2534                                                        pixel_size,
2535                                                        fb->pitches[0]);
2536                 linear_offset -= intel_crtc->dspaddr_offset;
2537         } else {
2538                 intel_crtc->dspaddr_offset = linear_offset;
2539         }
2540 
2541         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2542                 dspcntr |= DISPPLANE_ROTATE_180;
2543 
2544                 x += (intel_crtc->config.pipe_src_w - 1);
2545                 y += (intel_crtc->config.pipe_src_h - 1);
2546 
2547                 /* Finding the last pixel of the last line of the display
2548                 data and adding to linear_offset*/
2549                 linear_offset +=
2550                         (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2551                         (intel_crtc->config.pipe_src_w - 1) * pixel_size;
2552         }
2553 
2554         I915_WRITE(reg, dspcntr);
2555 
2556         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2557                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2558                       fb->pitches[0]);
2559         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2560         if (INTEL_INFO(dev)->gen >= 4) {
2561                 I915_WRITE(DSPSURF(plane),
2562                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2563                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2564                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2565         } else
2566                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2567         POSTING_READ(reg);
2568 }
2569 
2570 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2571                                           struct drm_framebuffer *fb,
2572                                           int x, int y)
2573 {
2574         struct drm_device *dev = crtc->dev;
2575         struct drm_i915_private *dev_priv = dev->dev_private;
2576         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2577         struct drm_i915_gem_object *obj;
2578         int plane = intel_crtc->plane;
2579         unsigned long linear_offset;
2580         u32 dspcntr;
2581         u32 reg = DSPCNTR(plane);
2582         int pixel_size;
2583 
2584         if (!intel_crtc->primary_enabled) {
2585                 I915_WRITE(reg, 0);
2586                 I915_WRITE(DSPSURF(plane), 0);
2587                 POSTING_READ(reg);
2588                 return;
2589         }
2590 
2591         obj = intel_fb_obj(fb);
2592         if (WARN_ON(obj == NULL))
2593                 return;
2594 
2595         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2596 
2597         dspcntr = DISPPLANE_GAMMA_ENABLE;
2598 
2599         dspcntr |= DISPLAY_PLANE_ENABLE;
2600 
2601         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2602                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2603 
2604         switch (fb->pixel_format) {
2605         case DRM_FORMAT_C8:
2606                 dspcntr |= DISPPLANE_8BPP;
2607                 break;
2608         case DRM_FORMAT_RGB565:
2609                 dspcntr |= DISPPLANE_BGRX565;
2610                 break;
2611         case DRM_FORMAT_XRGB8888:
2612         case DRM_FORMAT_ARGB8888:
2613                 dspcntr |= DISPPLANE_BGRX888;
2614                 break;
2615         case DRM_FORMAT_XBGR8888:
2616         case DRM_FORMAT_ABGR8888:
2617                 dspcntr |= DISPPLANE_RGBX888;
2618                 break;
2619         case DRM_FORMAT_XRGB2101010:
2620         case DRM_FORMAT_ARGB2101010:
2621                 dspcntr |= DISPPLANE_BGRX101010;
2622                 break;
2623         case DRM_FORMAT_XBGR2101010:
2624         case DRM_FORMAT_ABGR2101010:
2625                 dspcntr |= DISPPLANE_RGBX101010;
2626                 break;
2627         default:
2628                 BUG();
2629         }
2630 
2631         if (obj->tiling_mode != I915_TILING_NONE)
2632                 dspcntr |= DISPPLANE_TILED;
2633 
2634         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2635                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2636 
2637         linear_offset = y * fb->pitches[0] + x * pixel_size;
2638         intel_crtc->dspaddr_offset =
2639                 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2640                                                pixel_size,
2641                                                fb->pitches[0]);
2642         linear_offset -= intel_crtc->dspaddr_offset;
2643         if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2644                 dspcntr |= DISPPLANE_ROTATE_180;
2645 
2646                 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2647                         x += (intel_crtc->config.pipe_src_w - 1);
2648                         y += (intel_crtc->config.pipe_src_h - 1);
2649 
2650                         /* Finding the last pixel of the last line of the display
2651                         data and adding to linear_offset*/
2652                         linear_offset +=
2653                                 (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2654                                 (intel_crtc->config.pipe_src_w - 1) * pixel_size;
2655                 }
2656         }
2657 
2658         I915_WRITE(reg, dspcntr);
2659 
2660         DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2661                       i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2662                       fb->pitches[0]);
2663         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2664         I915_WRITE(DSPSURF(plane),
2665                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2666         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2667                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2668         } else {
2669                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2670                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2671         }
2672         POSTING_READ(reg);
2673 }
2674 
2675 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2676 static int
2677 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2678                            int x, int y, enum mode_set_atomic state)
2679 {
2680         struct drm_device *dev = crtc->dev;
2681         struct drm_i915_private *dev_priv = dev->dev_private;
2682 
2683         if (dev_priv->display.disable_fbc)
2684                 dev_priv->display.disable_fbc(dev);
2685         intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
2686 
2687         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2688 
2689         return 0;
2690 }
2691 
2692 void intel_display_handle_reset(struct drm_device *dev)
2693 {
2694         struct drm_i915_private *dev_priv = dev->dev_private;
2695         struct drm_crtc *crtc;
2696 
2697         /*
2698          * Flips in the rings have been nuked by the reset,
2699          * so complete all pending flips so that user space
2700          * will get its events and not get stuck.
2701          *
2702          * Also update the base address of all primary
2703          * planes to the the last fb to make sure we're
2704          * showing the correct fb after a reset.
2705          *
2706          * Need to make two loops over the crtcs so that we
2707          * don't try to grab a crtc mutex before the
2708          * pending_flip_queue really got woken up.
2709          */
2710 
2711         for_each_crtc(dev, crtc) {
2712                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2713                 enum plane plane = intel_crtc->plane;
2714 
2715                 intel_prepare_page_flip(dev, plane);
2716                 intel_finish_page_flip_plane(dev, plane);
2717         }
2718 
2719         for_each_crtc(dev, crtc) {
2720                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2721 
2722                 drm_modeset_lock(&crtc->mutex, NULL);
2723                 /*
2724                  * FIXME: Once we have proper support for primary planes (and
2725                  * disabling them without disabling the entire crtc) allow again
2726                  * a NULL crtc->primary->fb.
2727                  */
2728                 if (intel_crtc->active && crtc->primary->fb)
2729                         dev_priv->display.update_primary_plane(crtc,
2730                                                                crtc->primary->fb,
2731                                                                crtc->x,
2732                                                                crtc->y);
2733                 drm_modeset_unlock(&crtc->mutex);
2734         }
2735 }
2736 
2737 static int
2738 intel_finish_fb(struct drm_framebuffer *old_fb)
2739 {
2740         struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
2741         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2742         bool was_interruptible = dev_priv->mm.interruptible;
2743         int ret;
2744 
2745         /* Big Hammer, we also need to ensure that any pending
2746          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2747          * current scanout is retired before unpinning the old
2748          * framebuffer.
2749          *
2750          * This should only fail upon a hung GPU, in which case we
2751          * can safely continue.
2752          */
2753         dev_priv->mm.interruptible = false;
2754         ret = i915_gem_object_finish_gpu(obj);
2755         dev_priv->mm.interruptible = was_interruptible;
2756 
2757         return ret;
2758 }
2759 
2760 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2761 {
2762         struct drm_device *dev = crtc->dev;
2763         struct drm_i915_private *dev_priv = dev->dev_private;
2764         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2765         unsigned long flags;
2766         bool pending;
2767 
2768         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2769             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2770                 return false;
2771 
2772         spin_lock_irqsave(&dev->event_lock, flags);
2773         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2774         spin_unlock_irqrestore(&dev->event_lock, flags);
2775 
2776         return pending;
2777 }
2778 
2779 static int
2780 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2781                     struct drm_framebuffer *fb)
2782 {
2783         struct drm_device *dev = crtc->dev;
2784         struct drm_i915_private *dev_priv = dev->dev_private;
2785         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2786         enum pipe pipe = intel_crtc->pipe;
2787         struct drm_framebuffer *old_fb = crtc->primary->fb;
2788         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2789         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2790         int ret;
2791 
2792         if (intel_crtc_has_pending_flip(crtc)) {
2793                 DRM_ERROR("pipe is still busy with an old pageflip\n");
2794                 return -EBUSY;
2795         }
2796 
2797         /* no fb bound */
2798         if (!fb) {
2799                 DRM_ERROR("No FB bound\n");
2800                 return 0;
2801         }
2802 
2803         if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
2804                 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2805                           plane_name(intel_crtc->plane),
2806                           INTEL_INFO(dev)->num_pipes);
2807                 return -EINVAL;
2808         }
2809 
2810         mutex_lock(&dev->struct_mutex);
2811         ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
2812         if (ret == 0)
2813                 i915_gem_track_fb(old_obj, obj,
2814                                   INTEL_FRONTBUFFER_PRIMARY(pipe));
2815         mutex_unlock(&dev->struct_mutex);
2816         if (ret != 0) {
2817                 DRM_ERROR("pin & fence failed\n");
2818                 return ret;
2819         }
2820 
2821         /*
2822          * Update pipe size and adjust fitter if needed: the reason for this is
2823          * that in compute_mode_changes we check the native mode (not the pfit
2824          * mode) to see if we can flip rather than do a full mode set. In the
2825          * fastboot case, we'll flip, but if we don't update the pipesrc and
2826          * pfit state, we'll end up with a big fb scanned out into the wrong
2827          * sized surface.
2828          *
2829          * To fix this properly, we need to hoist the checks up into
2830          * compute_mode_changes (or above), check the actual pfit state and
2831          * whether the platform allows pfit disable with pipe active, and only
2832          * then update the pipesrc and pfit state, even on the flip path.
2833          */
2834         if (i915.fastboot) {
2835                 const struct drm_display_mode *adjusted_mode =
2836                         &intel_crtc->config.adjusted_mode;
2837 
2838                 I915_WRITE(PIPESRC(intel_crtc->pipe),
2839                            ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2840                            (adjusted_mode->crtc_vdisplay - 1));
2841                 if (!intel_crtc->config.pch_pfit.enabled &&
2842                     (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2843                      intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2844                         I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2845                         I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2846                         I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2847                 }
2848                 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2849                 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2850         }
2851 
2852         dev_priv->display.update_primary_plane(crtc, fb, x, y);
2853 
2854         if (intel_crtc->active)
2855                 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
2856 
2857         crtc->primary->fb = fb;
2858         crtc->x = x;
2859         crtc->y = y;
2860 
2861         if (old_fb) {
2862                 if (intel_crtc->active && old_fb != fb)
2863                         intel_wait_for_vblank(dev, intel_crtc->pipe);
2864                 mutex_lock(&dev->struct_mutex);
2865                 intel_unpin_fb_obj(old_obj);
2866                 mutex_unlock(&dev->struct_mutex);
2867         }
2868 
2869         mutex_lock(&dev->struct_mutex);
2870         intel_update_fbc(dev);
2871         mutex_unlock(&dev->struct_mutex);
2872 
2873         return 0;
2874 }
2875 
2876 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2877 {
2878         struct drm_device *dev = crtc->dev;
2879         struct drm_i915_private *dev_priv = dev->dev_private;
2880         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2881         int pipe = intel_crtc->pipe;
2882         u32 reg, temp;
2883 
2884         /* enable normal train */
2885         reg = FDI_TX_CTL(pipe);
2886         temp = I915_READ(reg);
2887         if (IS_IVYBRIDGE(dev)) {
2888                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2889                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2890         } else {
2891                 temp &= ~FDI_LINK_TRAIN_NONE;
2892                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2893         }
2894         I915_WRITE(reg, temp);
2895 
2896         reg = FDI_RX_CTL(pipe);
2897         temp = I915_READ(reg);
2898         if (HAS_PCH_CPT(dev)) {
2899                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2900                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2901         } else {
2902                 temp &= ~FDI_LINK_TRAIN_NONE;
2903                 temp |= FDI_LINK_TRAIN_NONE;
2904         }
2905         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2906 
2907         /* wait one idle pattern time */
2908         POSTING_READ(reg);
2909         udelay(1000);
2910 
2911         /* IVB wants error correction enabled */
2912         if (IS_IVYBRIDGE(dev))
2913                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2914                            FDI_FE_ERRC_ENABLE);
2915 }
2916 
2917 static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
2918 {
2919         return crtc->base.enabled && crtc->active &&
2920                 crtc->config.has_pch_encoder;
2921 }
2922 
2923 static void ivb_modeset_global_resources(struct drm_device *dev)
2924 {
2925         struct drm_i915_private *dev_priv = dev->dev_private;
2926         struct intel_crtc *pipe_B_crtc =
2927                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2928         struct intel_crtc *pipe_C_crtc =
2929                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2930         uint32_t temp;
2931 
2932         /*
2933          * When everything is off disable fdi C so that we could enable fdi B
2934          * with all lanes. Note that we don't care about enabled pipes without
2935          * an enabled pch encoder.
2936          */
2937         if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2938             !pipe_has_enabled_pch(pipe_C_crtc)) {
2939                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2940                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2941 
2942                 temp = I915_READ(SOUTH_CHICKEN1);
2943                 temp &= ~FDI_BC_BIFURCATION_SELECT;
2944                 DRM_DEBUG_KMS("disabling fdi C rx\n");
2945                 I915_WRITE(SOUTH_CHICKEN1, temp);
2946         }
2947 }
2948 
2949 /* The FDI link training functions for ILK/Ibexpeak. */
2950 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2951 {
2952         struct drm_device *dev = crtc->dev;
2953         struct drm_i915_private *dev_priv = dev->dev_private;
2954         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2955         int pipe = intel_crtc->pipe;
2956         u32 reg, temp, tries;
2957 
2958         /* FDI needs bits from pipe first */
2959         assert_pipe_enabled(dev_priv, pipe);
2960 
2961         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2962            for train result */
2963         reg = FDI_RX_IMR(pipe);
2964         temp = I915_READ(reg);
2965         temp &= ~FDI_RX_SYMBOL_LOCK;
2966         temp &= ~FDI_RX_BIT_LOCK;
2967         I915_WRITE(reg, temp);
2968         I915_READ(reg);
2969         udelay(150);
2970 
2971         /* enable CPU FDI TX and PCH FDI RX */
2972         reg = FDI_TX_CTL(pipe);
2973         temp = I915_READ(reg);
2974         temp &= ~FDI_DP_PORT_WIDTH_MASK;
2975         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2976         temp &= ~FDI_LINK_TRAIN_NONE;
2977         temp |= FDI_LINK_TRAIN_PATTERN_1;
2978         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2979 
2980         reg = FDI_RX_CTL(pipe);
2981         temp = I915_READ(reg);
2982         temp &= ~FDI_LINK_TRAIN_NONE;
2983         temp |= FDI_LINK_TRAIN_PATTERN_1;
2984         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2985 
2986         POSTING_READ(reg);
2987         udelay(150);
2988 
2989         /* Ironlake workaround, enable clock pointer after FDI enable*/
2990         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2991         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2992                    FDI_RX_PHASE_SYNC_POINTER_EN);
2993 
2994         reg = FDI_RX_IIR(pipe);
2995         for (tries = 0; tries < 5; tries++) {
2996                 temp = I915_READ(reg);
2997                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2998 
2999                 if ((temp & FDI_RX_BIT_LOCK)) {
3000                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3001                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3002                         break;
3003                 }
3004         }
3005         if (tries == 5)
3006                 DRM_ERROR("FDI train 1 fail!\n");
3007 
3008         /* Train 2 */
3009         reg = FDI_TX_CTL(pipe);
3010         temp = I915_READ(reg);
3011         temp &= ~FDI_LINK_TRAIN_NONE;
3012         temp |= FDI_LINK_TRAIN_PATTERN_2;
3013         I915_WRITE(reg, temp);
3014 
3015         reg = FDI_RX_CTL(pipe);
3016         temp = I915_READ(reg);
3017         temp &= ~FDI_LINK_TRAIN_NONE;
3018         temp |= FDI_LINK_TRAIN_PATTERN_2;
3019         I915_WRITE(reg, temp);
3020 
3021         POSTING_READ(reg);
3022         udelay(150);
3023 
3024         reg = FDI_RX_IIR(pipe);
3025         for (tries = 0; tries < 5; tries++) {
3026                 temp = I915_READ(reg);
3027                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3028 
3029                 if (temp & FDI_RX_SYMBOL_LOCK) {
3030                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3031                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3032                         break;
3033                 }
3034         }
3035         if (tries == 5)
3036                 DRM_ERROR("FDI train 2 fail!\n");
3037 
3038         DRM_DEBUG_KMS("FDI train done\n");
3039 
3040 }
3041 
3042 static const int snb_b_fdi_train_param[] = {
3043         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3044         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3045         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3046         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3047 };
3048 
3049 /* The FDI link training functions for SNB/Cougarpoint. */
3050 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3051 {
3052         struct drm_device *dev = crtc->dev;
3053         struct drm_i915_private *dev_priv = dev->dev_private;
3054         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3055         int pipe = intel_crtc->pipe;
3056         u32 reg, temp, i, retry;
3057 
3058         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3059            for train result */
3060         reg = FDI_RX_IMR(pipe);
3061         temp = I915_READ(reg);
3062         temp &= ~FDI_RX_SYMBOL_LOCK;
3063         temp &= ~FDI_RX_BIT_LOCK;
3064         I915_WRITE(reg, temp);
3065 
3066         POSTING_READ(reg);
3067         udelay(150);
3068 
3069         /* enable CPU FDI TX and PCH FDI RX */
3070         reg = FDI_TX_CTL(pipe);
3071         temp = I915_READ(reg);
3072         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3073         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3074         temp &= ~FDI_LINK_TRAIN_NONE;
3075         temp |= FDI_LINK_TRAIN_PATTERN_1;
3076         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3077         /* SNB-B */
3078         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3079         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3080 
3081         I915_WRITE(FDI_RX_MISC(pipe),
3082                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3083 
3084         reg = FDI_RX_CTL(pipe);
3085         temp = I915_READ(reg);
3086         if (HAS_PCH_CPT(dev)) {
3087                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3088                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3089         } else {
3090                 temp &= ~FDI_LINK_TRAIN_NONE;
3091                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3092         }
3093         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3094 
3095         POSTING_READ(reg);
3096         udelay(150);
3097 
3098         for (i = 0; i < 4; i++) {
3099                 reg = FDI_TX_CTL(pipe);
3100                 temp = I915_READ(reg);
3101                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3102                 temp |= snb_b_fdi_train_param[i];
3103                 I915_WRITE(reg, temp);
3104 
3105                 POSTING_READ(reg);
3106                 udelay(500);
3107 
3108                 for (retry = 0; retry < 5; retry++) {
3109                         reg = FDI_RX_IIR(pipe);
3110                         temp = I915_READ(reg);
3111                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3112                         if (temp & FDI_RX_BIT_LOCK) {
3113                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3114                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3115                                 break;
3116                         }
3117                         udelay(50);
3118                 }
3119                 if (retry < 5)
3120                         break;
3121         }
3122         if (i == 4)
3123                 DRM_ERROR("FDI train 1 fail!\n");
3124 
3125         /* Train 2 */
3126         reg = FDI_TX_CTL(pipe);
3127         temp = I915_READ(reg);
3128         temp &= ~FDI_LINK_TRAIN_NONE;
3129         temp |= FDI_LINK_TRAIN_PATTERN_2;
3130         if (IS_GEN6(dev)) {
3131                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3132                 /* SNB-B */
3133                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3134         }
3135         I915_WRITE(reg, temp);
3136 
3137         reg = FDI_RX_CTL(pipe);
3138         temp = I915_READ(reg);
3139         if (HAS_PCH_CPT(dev)) {
3140                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3141                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3142         } else {
3143                 temp &= ~FDI_LINK_TRAIN_NONE;
3144                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3145         }
3146         I915_WRITE(reg, temp);
3147 
3148         POSTING_READ(reg);
3149         udelay(150);
3150 
3151         for (i = 0; i < 4; i++) {
3152                 reg = FDI_TX_CTL(pipe);
3153                 temp = I915_READ(reg);
3154                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3155                 temp |= snb_b_fdi_train_param[i];
3156                 I915_WRITE(reg, temp);
3157 
3158                 POSTING_READ(reg);
3159                 udelay(500);
3160 
3161                 for (retry = 0; retry < 5; retry++) {
3162                         reg = FDI_RX_IIR(pipe);
3163                         temp = I915_READ(reg);
3164                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3165                         if (temp & FDI_RX_SYMBOL_LOCK) {
3166                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3167                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3168                                 break;
3169                         }
3170                         udelay(50);
3171                 }
3172                 if (retry < 5)
3173                         break;
3174         }
3175         if (i == 4)
3176                 DRM_ERROR("FDI train 2 fail!\n");
3177 
3178         DRM_DEBUG_KMS("FDI train done.\n");
3179 }
3180 
3181 /* Manual link training for Ivy Bridge A0 parts */
3182 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3183 {
3184         struct drm_device *dev = crtc->dev;
3185         struct drm_i915_private *dev_priv = dev->dev_private;
3186         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3187         int pipe = intel_crtc->pipe;
3188         u32 reg, temp, i, j;
3189 
3190         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3191            for train result */
3192         reg = FDI_RX_IMR(pipe);
3193         temp = I915_READ(reg);
3194         temp &= ~FDI_RX_SYMBOL_LOCK;
3195         temp &= ~FDI_RX_BIT_LOCK;
3196         I915_WRITE(reg, temp);
3197 
3198         POSTING_READ(reg);
3199         udelay(150);
3200 
3201         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3202                       I915_READ(FDI_RX_IIR(pipe)));
3203 
3204         /* Try each vswing and preemphasis setting twice before moving on */
3205         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3206                 /* disable first in case we need to retry */
3207                 reg = FDI_TX_CTL(pipe);
3208                 temp = I915_READ(reg);
3209                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3210                 temp &= ~FDI_TX_ENABLE;
3211                 I915_WRITE(reg, temp);
3212 
3213                 reg = FDI_RX_CTL(pipe);
3214                 temp = I915_READ(reg);
3215                 temp &= ~FDI_LINK_TRAIN_AUTO;
3216                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3217                 temp &= ~FDI_RX_ENABLE;
3218                 I915_WRITE(reg, temp);
3219 
3220                 /* enable CPU FDI TX and PCH FDI RX */
3221                 reg = FDI_TX_CTL(pipe);
3222                 temp = I915_READ(reg);
3223                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3224                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3225                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3226                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3227                 temp |= snb_b_fdi_train_param[j/2];
3228                 temp |= FDI_COMPOSITE_SYNC;
3229                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3230 
3231                 I915_WRITE(FDI_RX_MISC(pipe),
3232                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3233 
3234                 reg = FDI_RX_CTL(pipe);
3235                 temp = I915_READ(reg);
3236                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3237                 temp |= FDI_COMPOSITE_SYNC;
3238                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3239 
3240                 POSTING_READ(reg);
3241                 udelay(1); /* should be 0.5us */
3242 
3243                 for (i = 0; i < 4; i++) {
3244                         reg = FDI_RX_IIR(pipe);
3245                         temp = I915_READ(reg);
3246                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3247 
3248                         if (temp & FDI_RX_BIT_LOCK ||
3249                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3250                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3251                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3252                                               i);
3253                                 break;
3254                         }
3255                         udelay(1); /* should be 0.5us */
3256                 }
3257                 if (i == 4) {
3258                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3259                         continue;
3260                 }
3261 
3262                 /* Train 2 */
3263                 reg = FDI_TX_CTL(pipe);
3264                 temp = I915_READ(reg);
3265                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3266                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3267                 I915_WRITE(reg, temp);
3268 
3269                 reg = FDI_RX_CTL(pipe);
3270                 temp = I915_READ(reg);
3271                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3272                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3273                 I915_WRITE(reg, temp);
3274 
3275                 POSTING_READ(reg);
3276                 udelay(2); /* should be 1.5us */
3277 
3278                 for (i = 0; i < 4; i++) {
3279                         reg = FDI_RX_IIR(pipe);
3280                         temp = I915_READ(reg);
3281                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3282 
3283                         if (temp & FDI_RX_SYMBOL_LOCK ||
3284                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3285                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3286                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3287                                               i);
3288                                 goto train_done;
3289                         }
3290                         udelay(2); /* should be 1.5us */
3291                 }
3292                 if (i == 4)
3293                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3294         }
3295 
3296 train_done:
3297         DRM_DEBUG_KMS("FDI train done.\n");
3298 }
3299 
3300 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3301 {
3302         struct drm_device *dev = intel_crtc->base.dev;
3303         struct drm_i915_private *dev_priv = dev->dev_private;
3304         int pipe = intel_crtc->pipe;
3305         u32 reg, temp;
3306 
3307 
3308         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3309         reg = FDI_RX_CTL(pipe);
3310         temp = I915_READ(reg);
3311         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3312         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
3313         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3314         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3315 
3316         POSTING_READ(reg);
3317         udelay(200);
3318 
3319         /* Switch from Rawclk to PCDclk */
3320         temp = I915_READ(reg);
3321         I915_WRITE(reg, temp | FDI_PCDCLK);
3322 
3323         POSTING_READ(reg);
3324         udelay(200);
3325 
3326         /* Enable CPU FDI TX PLL, always on for Ironlake */
3327         reg = FDI_TX_CTL(pipe);
3328         temp = I915_READ(reg);
3329         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3330                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3331 
3332                 POSTING_READ(reg);
3333                 udelay(100);
3334         }
3335 }
3336 
3337 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3338 {
3339         struct drm_device *dev = intel_crtc->base.dev;
3340         struct drm_i915_private *dev_priv = dev->dev_private;
3341         int pipe = intel_crtc->pipe;
3342         u32 reg, temp;
3343 
3344         /* Switch from PCDclk to Rawclk */
3345         reg = FDI_RX_CTL(pipe);
3346         temp = I915_READ(reg);
3347         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3348 
3349         /* Disable CPU FDI TX PLL */
3350         reg = FDI_TX_CTL(pipe);
3351         temp = I915_READ(reg);
3352         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3353 
3354         POSTING_READ(reg);
3355         udelay(100);
3356 
3357         reg = FDI_RX_CTL(pipe);
3358         temp = I915_READ(reg);
3359         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3360 
3361         /* Wait for the clocks to turn off. */
3362         POSTING_READ(reg);
3363         udelay(100);
3364 }
3365 
3366 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3367 {
3368         struct drm_device *dev = crtc->dev;
3369         struct drm_i915_private *dev_priv = dev->dev_private;
3370         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3371         int pipe = intel_crtc->pipe;
3372         u32 reg, temp;
3373 
3374         /* disable CPU FDI tx and PCH FDI rx */
3375         reg = FDI_TX_CTL(pipe);
3376         temp = I915_READ(reg);
3377         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3378         POSTING_READ(reg);
3379 
3380         reg = FDI_RX_CTL(pipe);
3381         temp = I915_READ(reg);
3382         temp &= ~(0x7 << 16);
3383         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3384         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3385 
3386         POSTING_READ(reg);
3387         udelay(100);
3388 
3389         /* Ironlake workaround, disable clock pointer after downing FDI */
3390         if (HAS_PCH_IBX(dev))
3391                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3392 
3393         /* still set train pattern 1 */
3394         reg = FDI_TX_CTL(pipe);
3395         temp = I915_READ(reg);
3396         temp &= ~FDI_LINK_TRAIN_NONE;
3397         temp |= FDI_LINK_TRAIN_PATTERN_1;
3398         I915_WRITE(reg, temp);
3399 
3400         reg = FDI_RX_CTL(pipe);
3401         temp = I915_READ(reg);
3402         if (HAS_PCH_CPT(dev)) {
3403                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3404                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3405         } else {
3406                 temp &= ~FDI_LINK_TRAIN_NONE;
3407                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3408         }
3409         /* BPC in FDI rx is consistent with that in PIPECONF */
3410         temp &= ~(0x07 << 16);
3411         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3412         I915_WRITE(reg, temp);
3413 
3414         POSTING_READ(reg);
3415         udelay(100);
3416 }
3417 
3418 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3419 {
3420         struct intel_crtc *crtc;
3421 
3422         /* Note that we don't need to be called with mode_config.lock here
3423          * as our list of CRTC objects is static for the lifetime of the
3424          * device and so cannot disappear as we iterate. Similarly, we can
3425          * happily treat the predicates as racy, atomic checks as userspace
3426          * cannot claim and pin a new fb without at least acquring the
3427          * struct_mutex and so serialising with us.
3428          */
3429         for_each_intel_crtc(dev, crtc) {
3430                 if (atomic_read(&crtc->unpin_work_count) == 0)
3431                         continue;
3432 
3433                 if (crtc->unpin_work)
3434                         intel_wait_for_vblank(dev, crtc->pipe);
3435 
3436                 return true;
3437         }
3438 
3439         return false;
3440 }
3441 
3442 static void page_flip_completed(struct intel_crtc *intel_crtc)
3443 {
3444         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3445         struct intel_unpin_work *work = intel_crtc->unpin_work;
3446 
3447         /* ensure that the unpin work is consistent wrt ->pending. */
3448         smp_rmb();
3449         intel_crtc->unpin_work = NULL;
3450 
3451         if (work->event)
3452                 drm_send_vblank_event(intel_crtc->base.dev,
3453                                       intel_crtc->pipe,
3454                                       work->event);
3455 
3456         drm_crtc_vblank_put(&intel_crtc->base);
3457 
3458         wake_up_all(&dev_priv->pending_flip_queue);
3459         queue_work(dev_priv->wq, &work->work);
3460 
3461         trace_i915_flip_complete(intel_crtc->plane,
3462                                  work->pending_flip_obj);
3463 }
3464 
3465 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3466 {
3467         struct drm_device *dev = crtc->dev;
3468         struct drm_i915_private *dev_priv = dev->dev_private;
3469 
3470         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3471         if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3472                                        !intel_crtc_has_pending_flip(crtc),
3473                                        60*HZ) == 0)) {
3474                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3475                 unsigned long flags;
3476 
3477                 spin_lock_irqsave(&dev->event_lock, flags);
3478                 if (intel_crtc->unpin_work) {
3479                         WARN_ONCE(1, "Removing stuck page flip\n");
3480                         page_flip_completed(intel_crtc);
3481                 }
3482                 spin_unlock_irqrestore(&dev->event_lock, flags);
3483         }
3484 
3485         if (crtc->primary->fb) {
3486                 mutex_lock(&dev->struct_mutex);
3487                 intel_finish_fb(crtc->primary->fb);
3488                 mutex_unlock(&dev->struct_mutex);
3489         }
3490 }
3491 
3492 /* Program iCLKIP clock to the desired frequency */
3493 static void lpt_program_iclkip(struct drm_crtc *crtc)
3494 {
3495         struct drm_device *dev = crtc->dev;
3496         struct drm_i915_private *dev_priv = dev->dev_private;
3497         int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
3498         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3499         u32 temp;
3500 
3501         mutex_lock(&dev_priv->dpio_lock);
3502 
3503         /* It is necessary to ungate the pixclk gate prior to programming
3504          * the divisors, and gate it back when it is done.
3505          */
3506         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3507 
3508         /* Disable SSCCTL */
3509         intel_sbi_write(dev_priv, SBI_SSCCTL6,
3510                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3511                                 SBI_SSCCTL_DISABLE,
3512                         SBI_ICLK);
3513 
3514         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3515         if (clock == 20000) {
3516                 auxdiv = 1;
3517                 divsel = 0x41;
3518                 phaseinc = 0x20;
3519         } else {
3520                 /* The iCLK virtual clock root frequency is in MHz,
3521                  * but the adjusted_mode->crtc_clock in in KHz. To get the
3522                  * divisors, it is necessary to divide one by another, so we
3523                  * convert the virtual clock precision to KHz here for higher
3524                  * precision.
3525                  */
3526                 u32 iclk_virtual_root_freq = 172800 * 1000;
3527                 u32 iclk_pi_range = 64;
3528                 u32 desired_divisor, msb_divisor_value, pi_value;
3529 
3530                 desired_divisor = (iclk_virtual_root_freq / clock);
3531                 msb_divisor_value = desired_divisor / iclk_pi_range;
3532                 pi_value = desired_divisor % iclk_pi_range;
3533 
3534                 auxdiv = 0;
3535                 divsel = msb_divisor_value - 2;
3536                 phaseinc = pi_value;
3537         }
3538 
3539         /* This should not happen with any sane values */
3540         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3541                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3542         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3543                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3544 
3545         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3546                         clock,
3547                         auxdiv,
3548                         divsel,
3549                         phasedir,
3550                         phaseinc);
3551 
3552         /* Program SSCDIVINTPHASE6 */
3553         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3554         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3555         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3556         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3557         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3558         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3559         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3560         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3561 
3562         /* Program SSCAUXDIV */
3563         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3564         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3565         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3566         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3567 
3568         /* Enable modulator and associated divider */
3569         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3570         temp &= ~SBI_SSCCTL_DISABLE;
3571         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3572 
3573         /* Wait for initialization time */
3574         udelay(24);
3575 
3576         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3577 
3578         mutex_unlock(&dev_priv->dpio_lock);
3579 }
3580 
3581 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3582                                                 enum pipe pch_transcoder)
3583 {
3584         struct drm_device *dev = crtc->base.dev;
3585         struct drm_i915_private *dev_priv = dev->dev_private;
3586         enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3587 
3588         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3589                    I915_READ(HTOTAL(cpu_transcoder)));
3590         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3591                    I915_READ(HBLANK(cpu_transcoder)));
3592         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3593                    I915_READ(HSYNC(cpu_transcoder)));
3594 
3595         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3596                    I915_READ(VTOTAL(cpu_transcoder)));
3597         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3598                    I915_READ(VBLANK(cpu_transcoder)));
3599         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3600                    I915_READ(VSYNC(cpu_transcoder)));
3601         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3602                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
3603 }
3604 
3605 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
3606 {
3607         struct drm_i915_private *dev_priv = dev->dev_private;
3608         uint32_t temp;
3609 
3610         temp = I915_READ(SOUTH_CHICKEN1);
3611         if (temp & FDI_BC_BIFURCATION_SELECT)
3612                 return;
3613 
3614         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3615         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3616 
3617         temp |= FDI_BC_BIFURCATION_SELECT;
3618         DRM_DEBUG_KMS("enabling fdi C rx\n");
3619         I915_WRITE(SOUTH_CHICKEN1, temp);
3620         POSTING_READ(SOUTH_CHICKEN1);
3621 }
3622 
3623 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3624 {
3625         struct drm_device *dev = intel_crtc->base.dev;
3626         struct drm_i915_private *dev_priv = dev->dev_private;
3627 
3628         switch (intel_crtc->pipe) {
3629         case PIPE_A:
3630                 break;
3631         case PIPE_B:
3632                 if (intel_crtc->config.fdi_lanes > 2)
3633                         WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
3634                 else
3635                         cpt_enable_fdi_bc_bifurcation(dev);
3636 
3637                 break;
3638         case PIPE_C:
3639                 cpt_enable_fdi_bc_bifurcation(dev);
3640 
3641                 break;
3642         default:
3643                 BUG();
3644         }
3645 }
3646 
3647 /*
3648  * Enable PCH resources required for PCH ports:
3649  *   - PCH PLLs
3650  *   - FDI training & RX/TX
3651  *   - update transcoder timings
3652  *   - DP transcoding bits
3653  *   - transcoder
3654  */
3655 static void ironlake_pch_enable(struct drm_crtc *crtc)
3656 {
3657         struct drm_device *dev = crtc->dev;
3658         struct drm_i915_private *dev_priv = dev->dev_private;
3659         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3660         int pipe = intel_crtc->pipe;
3661         u32 reg, temp;
3662 
3663         assert_pch_transcoder_disabled(dev_priv, pipe);
3664 
3665         if (IS_IVYBRIDGE(dev))
3666                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
3667 
3668         /* Write the TU size bits before fdi link training, so that error
3669          * detection works. */
3670         I915_WRITE(FDI_RX_TUSIZE1(pipe),
3671                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3672 
3673         /* For PCH output, training FDI link */
3674         dev_priv->display.fdi_link_train(crtc);
3675 
3676         /* We need to program the right clock selection before writing the pixel
3677          * mutliplier into the DPLL. */
3678         if (HAS_PCH_CPT(dev)) {
3679                 u32 sel;
3680 
3681                 temp = I915_READ(PCH_DPLL_SEL);
3682                 temp |= TRANS_DPLL_ENABLE(pipe);
3683                 sel = TRANS_DPLLB_SEL(pipe);
3684                 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
3685                         temp |= sel;
3686                 else
3687                         temp &= ~sel;
3688                 I915_WRITE(PCH_DPLL_SEL, temp);
3689         }
3690 
3691         /* XXX: pch pll's can be enabled any time before we enable the PCH
3692          * transcoder, and we actually should do this to not upset any PCH
3693          * transcoder that already use the clock when we share it.
3694          *
3695          * Note that enable_shared_dpll tries to do the right thing, but
3696          * get_shared_dpll unconditionally resets the pll - we need that to have
3697          * the right LVDS enable sequence. */
3698         intel_enable_shared_dpll(intel_crtc);
3699 
3700         /* set transcoder timing, panel must allow it */
3701         assert_panel_unlocked(dev_priv, pipe);
3702         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
3703 
3704         intel_fdi_normal_train(crtc);
3705 
3706         /* For PCH DP, enable TRANS_DP_CTL */
3707         if (HAS_PCH_CPT(dev) &&
3708             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3709              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3710                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3711                 reg = TRANS_DP_CTL(pipe);
3712                 temp = I915_READ(reg);
3713                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3714                           TRANS_DP_SYNC_MASK |
3715                           TRANS_DP_BPC_MASK);
3716                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3717                          TRANS_DP_ENH_FRAMING);
3718                 temp |= bpc << 9; /* same format but at 11:9 */
3719 
3720                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3721                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3722                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3723                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3724 
3725                 switch (intel_trans_dp_port_sel(crtc)) {
3726                 case PCH_DP_B:
3727                         temp |= TRANS_DP_PORT_SEL_B;
3728                         break;
3729                 case PCH_DP_C:
3730                         temp |= TRANS_DP_PORT_SEL_C;
3731                         break;
3732                 case PCH_DP_D:
3733                         temp |= TRANS_DP_PORT_SEL_D;
3734                         break;
3735                 default:
3736                         BUG();
3737                 }
3738 
3739                 I915_WRITE(reg, temp);
3740         }
3741 
3742         ironlake_enable_pch_transcoder(dev_priv, pipe);
3743 }
3744 
3745 static void lpt_pch_enable(struct drm_crtc *crtc)
3746 {
3747         struct drm_device *dev = crtc->dev;
3748         struct drm_i915_private *dev_priv = dev->dev_private;
3749         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3750         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3751 
3752         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
3753 
3754         lpt_program_iclkip(crtc);
3755 
3756         /* Set transcoder timing. */
3757         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
3758 
3759         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3760 }
3761 
3762 void intel_put_shared_dpll(struct intel_crtc *crtc)
3763 {
3764         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3765 
3766         if (pll == NULL)
3767                 return;
3768 
3769         if (pll->refcount == 0) {
3770                 WARN(1, "bad %s refcount\n", pll->name);
3771                 return;
3772         }
3773 
3774         if (--pll->refcount == 0) {
3775                 WARN_ON(pll->on);
3776                 WARN_ON(pll->active);
3777         }
3778 
3779         crtc->config.shared_dpll = DPLL_ID_PRIVATE;
3780 }
3781 
3782 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3783 {
3784         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3785         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3786         enum intel_dpll_id i;
3787 
3788         if (pll) {
3789                 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3790                               crtc->base.base.id, pll->name);
3791                 intel_put_shared_dpll(crtc);
3792         }
3793 
3794         if (HAS_PCH_IBX(dev_priv->dev)) {
3795                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3796                 i = (enum intel_dpll_id) crtc->pipe;
3797                 pll = &dev_priv->shared_dplls[i];
3798 
3799                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3800                               crtc->base.base.id, pll->name);
3801 
3802                 WARN_ON(pll->refcount);
3803 
3804                 goto found;
3805         }
3806 
3807         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3808                 pll = &dev_priv->shared_dplls[i];
3809 
3810                 /* Only want to check enabled timings first */
3811                 if (pll->refcount == 0)
3812                         continue;
3813 
3814                 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3815                            sizeof(pll->hw_state)) == 0) {
3816                         DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
3817                                       crtc->base.base.id,
3818                                       pll->name, pll->refcount, pll->active);
3819 
3820                         goto found;
3821                 }
3822         }
3823 
3824         /* Ok no matching timings, maybe there's a free one? */
3825         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3826                 pll = &dev_priv->shared_dplls[i];
3827                 if (pll->refcount == 0) {
3828                         DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3829                                       crtc->base.base.id, pll->name);
3830                         goto found;
3831                 }
3832         }
3833 
3834         return NULL;
3835 
3836 found:
3837         if (pll->refcount == 0)
3838                 pll->hw_state = crtc->config.dpll_hw_state;
3839 
3840         crtc->config.shared_dpll = i;
3841         DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3842                          pipe_name(crtc->pipe));
3843 
3844         pll->refcount++;
3845 
3846         return pll;
3847 }
3848 
3849 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3850 {
3851         struct drm_i915_private *dev_priv = dev->dev_private;
3852         int dslreg = PIPEDSL(pipe);
3853         u32 temp;
3854 
3855         temp = I915_READ(dslreg);
3856         udelay(500);
3857         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3858                 if (wait_for(I915_READ(dslreg) != temp, 5))
3859                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
3860         }
3861 }
3862 
3863 static void ironlake_pfit_enable(struct intel_crtc *crtc)
3864 {
3865         struct drm_device *dev = crtc->base.dev;
3866         struct drm_i915_private *dev_priv = dev->dev_private;
3867         int pipe = crtc->pipe;
3868 
3869         if (crtc->config.pch_pfit.enabled) {
3870                 /* Force use of hard-coded filter coefficients
3871                  * as some pre-programmed values are broken,
3872                  * e.g. x201.
3873                  */
3874                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3875                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3876                                                  PF_PIPE_SEL_IVB(pipe));
3877                 else
3878                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3879                 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3880                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
3881         }
3882 }
3883 
3884 static void intel_enable_planes(struct drm_crtc *crtc)
3885 {
3886         struct drm_device *dev = crtc->dev;
3887         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3888         struct drm_plane *plane;
3889         struct intel_plane *intel_plane;
3890 
3891         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3892                 intel_plane = to_intel_plane(plane);
3893                 if (intel_plane->pipe == pipe)
3894                         intel_plane_restore(&intel_plane->base);
3895         }
3896 }
3897 
3898 static void intel_disable_planes(struct drm_crtc *crtc)
3899 {
3900         struct drm_device *dev = crtc->dev;
3901         enum pipe pipe = to_intel_crtc(crtc)->pipe;
3902         struct drm_plane *plane;
3903         struct intel_plane *intel_plane;
3904 
3905         drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
3906                 intel_plane = to_intel_plane(plane);
3907                 if (intel_plane->pipe == pipe)
3908                         intel_plane_disable(&intel_plane->base);
3909         }
3910 }
3911 
3912 void hsw_enable_ips(struct intel_crtc *crtc)
3913 {
3914         struct drm_device *dev = crtc->base.dev;
3915         struct drm_i915_private *dev_priv = dev->dev_private;
3916 
3917         if (!crtc->config.ips_enabled)
3918                 return;
3919 
3920         /* We can only enable IPS after we enable a plane and wait for a vblank */
3921         intel_wait_for_vblank(dev, crtc->pipe);
3922 
3923         assert_plane_enabled(dev_priv, crtc->plane);
3924         if (IS_BROADWELL(dev)) {
3925                 mutex_lock(&dev_priv->rps.hw_lock);
3926                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
3927                 mutex_unlock(&dev_priv->rps.hw_lock);
3928                 /* Quoting Art Runyan: "its not safe to expect any particular
3929                  * value in IPS_CTL bit 31 after enabling IPS through the
3930                  * mailbox." Moreover, the mailbox may return a bogus state,
3931                  * so we need to just enable it and continue on.
3932                  */
3933         } else {
3934                 I915_WRITE(IPS_CTL, IPS_ENABLE);
3935                 /* The bit only becomes 1 in the next vblank, so this wait here
3936                  * is essentially intel_wait_for_vblank. If we don't have this
3937                  * and don't wait for vblanks until the end of crtc_enable, then
3938                  * the HW state readout code will complain that the expected
3939                  * IPS_CTL value is not the one we read. */
3940                 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
3941                         DRM_ERROR("Timed out waiting for IPS enable\n");
3942         }
3943 }
3944 
3945 void hsw_disable_ips(struct intel_crtc *crtc)
3946 {
3947         struct drm_device *dev = crtc->base.dev;
3948         struct drm_i915_private *dev_priv = dev->dev_private;
3949 
3950         if (!crtc->config.ips_enabled)
3951                 return;
3952 
3953         assert_plane_enabled(dev_priv, crtc->plane);
3954         if (IS_BROADWELL(dev)) {
3955                 mutex_lock(&dev_priv->rps.hw_lock);
3956                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
3957                 mutex_unlock(&dev_priv->rps.hw_lock);
3958                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
3959                 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
3960                         DRM_ERROR("Timed out waiting for IPS disable\n");
3961         } else {
3962                 I915_WRITE(IPS_CTL, 0);
3963                 POSTING_READ(IPS_CTL);
3964         }
3965 
3966         /* We need to wait for a vblank before we can disable the plane. */
3967         intel_wait_for_vblank(dev, crtc->pipe);
3968 }
3969 
3970 /** Loads the palette/gamma unit for the CRTC with the prepared values */
3971 static void intel_crtc_load_lut(struct drm_crtc *crtc)
3972 {
3973         struct drm_device *dev = crtc->dev;
3974         struct drm_i915_private *dev_priv = dev->dev_private;
3975         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3976         enum pipe pipe = intel_crtc->pipe;
3977         int palreg = PALETTE(pipe);
3978         int i;
3979         bool reenable_ips = false;
3980 
3981         /* The clocks have to be on to load the palette. */
3982         if (!crtc->enabled || !intel_crtc->active)
3983                 return;
3984 
3985         if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3986                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3987                         assert_dsi_pll_enabled(dev_priv);
3988                 else
3989                         assert_pll_enabled(dev_priv, pipe);
3990         }
3991 
3992         /* use legacy palette for Ironlake */
3993         if (!HAS_GMCH_DISPLAY(dev))
3994                 palreg = LGC_PALETTE(pipe);
3995 
3996         /* Workaround : Do not read or write the pipe palette/gamma data while
3997          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3998          */
3999         if (IS_HASWELL(dev) && intel_crtc->config.ips_enabled &&
4000             ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4001              GAMMA_MODE_MODE_SPLIT)) {
4002                 hsw_disable_ips(intel_crtc);
4003                 reenable_ips = true;
4004         }
4005 
4006         for (i = 0; i < 256; i++) {
4007                 I915_WRITE(palreg + 4 * i,
4008                            (intel_crtc->lut_r[i] << 16) |
4009                            (intel_crtc->lut_g[i] << 8) |
4010                            intel_crtc->lut_b[i]);
4011         }
4012 
4013         if (reenable_ips)
4014                 hsw_enable_ips(intel_crtc);
4015 }
4016 
4017 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
4018 {
4019         if (!enable && intel_crtc->overlay) {
4020                 struct drm_device *dev = intel_crtc->base.dev;
4021                 struct drm_i915_private *dev_priv = dev->dev_private;
4022 
4023                 mutex_lock(&dev->struct_mutex);
4024                 dev_priv->mm.interruptible = false;
4025                 (void) intel_overlay_switch_off(intel_crtc->overlay);
4026                 dev_priv->mm.interruptible = true;
4027                 mutex_unlock(&dev->struct_mutex);
4028         }
4029 
4030         /* Let userspace switch the overlay on again. In most cases userspace
4031          * has to recompute where to put it anyway.
4032          */
4033 }
4034 
4035 static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4036 {
4037         struct drm_device *dev = crtc->dev;
4038         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4039         int pipe = intel_crtc->pipe;
4040 
4041         assert_vblank_disabled(crtc);
4042 
4043         drm_vblank_on(dev, pipe);
4044 
4045         intel_enable_primary_hw_plane(crtc->primary, crtc);
4046         intel_enable_planes(crtc);
4047         intel_crtc_update_cursor(crtc, true);
4048         intel_crtc_dpms_overlay(intel_crtc, true);
4049 
4050         hsw_enable_ips(intel_crtc);
4051 
4052         mutex_lock(&dev->struct_mutex);
4053         intel_update_fbc(dev);
4054         mutex_unlock(&dev->struct_mutex);
4055 
4056         /*
4057          * FIXME: Once we grow proper nuclear flip support out of this we need
4058          * to compute the mask of flip planes precisely. For the time being
4059          * consider this a flip from a NULL plane.
4060          */
4061         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4062 }
4063 
4064 static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4065 {
4066         struct drm_device *dev = crtc->dev;
4067         struct drm_i915_private *dev_priv = dev->dev_private;
4068         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4069         int pipe = intel_crtc->pipe;
4070         int plane = intel_crtc->plane;
4071 
4072         intel_crtc_wait_for_pending_flips(crtc);
4073 
4074         if (dev_priv->fbc.plane == plane)
4075                 intel_disable_fbc(dev);
4076 
4077         hsw_disable_ips(intel_crtc);
4078 
4079         intel_crtc_dpms_overlay(intel_crtc, false);
4080         intel_crtc_update_cursor(crtc, false);
4081         intel_disable_planes(crtc);
4082         intel_disable_primary_hw_plane(crtc->primary, crtc);
4083 
4084         /*
4085          * FIXME: Once we grow proper nuclear flip support out of this we need
4086          * to compute the mask of flip planes precisely. For the time being
4087          * consider this a flip to a NULL plane.
4088          */
4089         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4090 
4091         drm_vblank_off(dev, pipe);
4092 
4093         assert_vblank_disabled(crtc);
4094 }
4095 
4096 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4097 {
4098         struct drm_device *dev = crtc->dev;
4099         struct drm_i915_private *dev_priv = dev->dev_private;
4100         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4101         struct intel_encoder *encoder;
4102         int pipe = intel_crtc->pipe;
4103 
4104         WARN_ON(!crtc->enabled);
4105 
4106         if (intel_crtc->active)
4107                 return;
4108 
4109         if (intel_crtc->config.has_pch_encoder)
4110                 intel_prepare_shared_dpll(intel_crtc);
4111 
4112         if (intel_crtc->config.has_dp_encoder)
4113                 intel_dp_set_m_n(intel_crtc);
4114 
4115         intel_set_pipe_timings(intel_crtc);
4116 
4117         if (intel_crtc->config.has_pch_encoder) {
4118                 intel_cpu_transcoder_set_m_n(intel_crtc,
4119                                      &intel_crtc->config.fdi_m_n, NULL);
4120         }
4121 
4122         ironlake_set_pipeconf(crtc);
4123 
4124         intel_crtc->active = true;
4125 
4126         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4127         intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4128 
4129         for_each_encoder_on_crtc(dev, crtc, encoder)
4130                 if (encoder->pre_enable)
4131                         encoder->pre_enable(encoder);
4132 
4133         if (intel_crtc->config.has_pch_encoder) {
4134                 /* Note: FDI PLL enabling _must_ be done before we enable the
4135                  * cpu pipes, hence this is separate from all the other fdi/pch
4136                  * enabling. */
4137                 ironlake_fdi_pll_enable(intel_crtc);
4138         } else {
4139                 assert_fdi_tx_disabled(dev_priv, pipe);
4140                 assert_fdi_rx_disabled(dev_priv, pipe);
4141         }
4142 
4143         ironlake_pfit_enable(intel_crtc);
4144 
4145         /*
4146          * On ILK+ LUT must be loaded before the pipe is running but with
4147          * clocks enabled
4148          */
4149         intel_crtc_load_lut(crtc);
4150 
4151         intel_update_watermarks(crtc);
4152         intel_enable_pipe(intel_crtc);
4153 
4154         if (intel_crtc->config.has_pch_encoder)
4155                 ironlake_pch_enable(crtc);
4156 
4157         for_each_encoder_on_crtc(dev, crtc, encoder)
4158                 encoder->enable(encoder);
4159 
4160         if (HAS_PCH_CPT(dev))
4161                 cpt_verify_modeset(dev, intel_crtc->pipe);
4162 
4163         intel_crtc_enable_planes(crtc);
4164 }
4165 
4166 /* IPS only exists on ULT machines and is tied to pipe A. */
4167 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4168 {
4169         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4170 }
4171 
4172 /*
4173  * This implements the workaround described in the "notes" section of the mode
4174  * set sequence documentation. When going from no pipes or single pipe to
4175  * multiple pipes, and planes are enabled after the pipe, we need to wait at
4176  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
4177  */
4178 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
4179 {
4180         struct drm_device *dev = crtc->base.dev;
4181         struct intel_crtc *crtc_it, *other_active_crtc = NULL;
4182 
4183         /* We want to get the other_active_crtc only if there's only 1 other
4184          * active crtc. */
4185         for_each_intel_crtc(dev, crtc_it) {
4186                 if (!crtc_it->active || crtc_it == crtc)
4187                         continue;
4188 
4189                 if (other_active_crtc)
4190                         return;
4191 
4192                 other_active_crtc = crtc_it;
4193         }
4194         if (!other_active_crtc)
4195                 return;
4196 
4197         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4198         intel_wait_for_vblank(dev, other_active_crtc->pipe);
4199 }
4200 
4201 static void haswell_crtc_enable(struct drm_crtc *crtc)
4202 {
4203         struct drm_device *dev = crtc->dev;
4204         struct drm_i915_private *dev_priv = dev->dev_private;
4205         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4206         struct intel_encoder *encoder;
4207         int pipe = intel_crtc->pipe;
4208 
4209         WARN_ON(!crtc->enabled);
4210 
4211         if (intel_crtc->active)
4212                 return;
4213 
4214         if (intel_crtc_to_shared_dpll(intel_crtc))
4215                 intel_enable_shared_dpll(intel_crtc);
4216 
4217         if (intel_crtc->config.has_dp_encoder)
4218                 intel_dp_set_m_n(intel_crtc);
4219 
4220         intel_set_pipe_timings(intel_crtc);
4221 
4222         if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
4223                 I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
4224                            intel_crtc->config.pixel_multiplier - 1);
4225         }
4226 
4227         if (intel_crtc->config.has_pch_encoder) {
4228                 intel_cpu_transcoder_set_m_n(intel_crtc,
4229                                      &intel_crtc->config.fdi_m_n, NULL);
4230         }
4231 
4232         haswell_set_pipeconf(crtc);
4233 
4234         intel_set_pipe_csc(crtc);
4235 
4236         intel_crtc->active = true;
4237 
4238         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4239         for_each_encoder_on_crtc(dev, crtc, encoder)
4240                 if (encoder->pre_enable)
4241                         encoder->pre_enable(encoder);
4242 
4243         if (intel_crtc->config.has_pch_encoder) {
4244                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4245                 dev_priv->display.fdi_link_train(crtc);
4246         }
4247 
4248         intel_ddi_enable_pipe_clock(intel_crtc);
4249 
4250         ironlake_pfit_enable(intel_crtc);
4251 
4252         /*
4253          * On ILK+ LUT must be loaded before the pipe is running but with
4254          * clocks enabled
4255          */
4256         intel_crtc_load_lut(crtc);
4257 
4258         intel_ddi_set_pipe_settings(crtc);
4259         intel_ddi_enable_transcoder_func(crtc);
4260 
4261         intel_update_watermarks(crtc);
4262         intel_enable_pipe(intel_crtc);
4263 
4264         if (intel_crtc->config.has_pch_encoder)
4265                 lpt_pch_enable(crtc);
4266 
4267         if (intel_crtc->config.dp_encoder_is_mst)
4268                 intel_ddi_set_vc_payload_alloc(crtc, true);
4269 
4270         for_each_encoder_on_crtc(dev, crtc, encoder) {
4271                 encoder->enable(encoder);
4272                 intel_opregion_notify_encoder(encoder, true);
4273         }
4274 
4275         /* If we change the relative order between pipe/planes enabling, we need
4276          * to change the workaround. */
4277         haswell_mode_set_planes_workaround(intel_crtc);
4278         intel_crtc_enable_planes(crtc);
4279 }
4280 
4281 static void ironlake_pfit_disable(struct intel_crtc *crtc)
4282 {
4283         struct drm_device *dev = crtc->base.dev;
4284         struct drm_i915_private *dev_priv = dev->dev_private;
4285         int pipe = crtc->pipe;
4286 
4287         /* To avoid upsetting the power well on haswell only disable the pfit if
4288          * it's in use. The hw state code will make sure we get this right. */
4289         if (crtc->config.pch_pfit.enabled) {
4290                 I915_WRITE(PF_CTL(pipe), 0);
4291                 I915_WRITE(PF_WIN_POS(pipe), 0);
4292                 I915_WRITE(PF_WIN_SZ(pipe), 0);
4293         }
4294 }
4295 
4296 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4297 {
4298         struct drm_device *dev = crtc->dev;
4299         struct drm_i915_private *dev_priv = dev->dev_private;
4300         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4301         struct intel_encoder *encoder;
4302         int pipe = intel_crtc->pipe;
4303         u32 reg, temp;
4304 
4305         if (!intel_crtc->active)
4306                 return;
4307 
4308         intel_crtc_disable_planes(crtc);
4309 
4310         for_each_encoder_on_crtc(dev, crtc, encoder)
4311                 encoder->disable(encoder);
4312 
4313         if (intel_crtc->config.has_pch_encoder)
4314                 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4315 
4316         intel_disable_pipe(intel_crtc);
4317 
4318         ironlake_pfit_disable(intel_crtc);
4319 
4320         for_each_encoder_on_crtc(dev, crtc, encoder)
4321                 if (encoder->post_disable)
4322                         encoder->post_disable(encoder);
4323 
4324         if (intel_crtc->config.has_pch_encoder) {
4325                 ironlake_fdi_disable(crtc);
4326 
4327                 ironlake_disable_pch_transcoder(dev_priv, pipe);
4328 
4329                 if (HAS_PCH_CPT(dev)) {
4330                         /* disable TRANS_DP_CTL */
4331                         reg = TRANS_DP_CTL(pipe);
4332                         temp = I915_READ(reg);
4333                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4334                                   TRANS_DP_PORT_SEL_MASK);
4335                         temp |= TRANS_DP_PORT_SEL_NONE;
4336                         I915_WRITE(reg, temp);
4337 
4338                         /* disable DPLL_SEL */
4339                         temp = I915_READ(PCH_DPLL_SEL);
4340                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4341                         I915_WRITE(PCH_DPLL_SEL, temp);
4342                 }
4343 
4344                 /* disable PCH DPLL */
4345                 intel_disable_shared_dpll(intel_crtc);
4346 
4347                 ironlake_fdi_pll_disable(intel_crtc);
4348         }
4349 
4350         intel_crtc->active = false;
4351         intel_update_watermarks(crtc);
4352 
4353         mutex_lock(&dev->struct_mutex);
4354         intel_update_fbc(dev);
4355         mutex_unlock(&dev->struct_mutex);
4356 }
4357 
4358 static void haswell_crtc_disable(struct drm_crtc *crtc)
4359 {
4360         struct drm_device *dev = crtc->dev;
4361         struct drm_i915_private *dev_priv = dev->dev_private;
4362         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4363         struct intel_encoder *encoder;
4364         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4365 
4366         if (!intel_crtc->active)
4367                 return;
4368 
4369         intel_crtc_disable_planes(crtc);
4370 
4371         for_each_encoder_on_crtc(dev, crtc, encoder) {
4372                 intel_opregion_notify_encoder(encoder, false);
4373                 encoder->disable(encoder);
4374         }
4375 
4376         if (intel_crtc->config.has_pch_encoder)
4377                 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4378         intel_disable_pipe(intel_crtc);
4379 
4380         if (intel_crtc->config.dp_encoder_is_mst)
4381                 intel_ddi_set_vc_payload_alloc(crtc, false);
4382 
4383         intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4384 
4385         ironlake_pfit_disable(intel_crtc);
4386 
4387         intel_ddi_disable_pipe_clock(intel_crtc);
4388 
4389         if (intel_crtc->config.has_pch_encoder) {
4390                 lpt_disable_pch_transcoder(dev_priv);
4391                 intel_ddi_fdi_disable(crtc);
4392         }
4393 
4394         for_each_encoder_on_crtc(dev, crtc, encoder)
4395                 if (encoder->post_disable)
4396                         encoder->post_disable(encoder);
4397 
4398         intel_crtc->active = false;
4399         intel_update_watermarks(crtc);
4400 
4401         mutex_lock(&dev->struct_mutex);
4402         intel_update_fbc(dev);
4403         mutex_unlock(&dev->struct_mutex);
4404 
4405         if (intel_crtc_to_shared_dpll(intel_crtc))
4406                 intel_disable_shared_dpll(intel_crtc);
4407 }
4408 
4409 static void ironlake_crtc_off(struct drm_crtc *crtc)
4410 {
4411         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4412         intel_put_shared_dpll(intel_crtc);
4413 }
4414 
4415 
4416 static void i9xx_pfit_enable(struct intel_crtc *crtc)
4417 {
4418         struct drm_device *dev = crtc->base.dev;
4419         struct drm_i915_private *dev_priv = dev->dev_private;
4420         struct intel_crtc_config *pipe_config = &crtc->config;
4421 
4422         if (!crtc->config.gmch_pfit.control)
4423                 return;
4424 
4425         /*
4426          * The panel fitter should only be adjusted whilst the pipe is disabled,
4427          * according to register description and PRM.
4428          */
4429         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
4430         assert_pipe_disabled(dev_priv, crtc->pipe);
4431 
4432         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
4433         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
4434 
4435         /* Border color in case we don't scale up to the full screen. Black by
4436          * default, change to something else for debugging. */
4437         I915_WRITE(BCLRPAT(crtc->pipe), 0);
4438 }
4439 
4440 static enum intel_display_power_domain port_to_power_domain(enum port port)
4441 {
4442         switch (port) {
4443         case PORT_A:
4444                 return POWER_DOMAIN_PORT_DDI_A_4_LANES;
4445         case PORT_B:
4446                 return POWER_DOMAIN_PORT_DDI_B_4_LANES;
4447         case PORT_C:
4448                 return POWER_DOMAIN_PORT_DDI_C_4_LANES;
4449         case PORT_D:
4450                 return POWER_DOMAIN_PORT_DDI_D_4_LANES;
4451         default:
4452                 WARN_ON_ONCE(1);
4453                 return POWER_DOMAIN_PORT_OTHER;
4454         }
4455 }
4456 
4457 #define for_each_power_domain(domain, mask)                             \
4458         for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
4459                 if ((1 << (domain)) & (mask))
4460 
4461 enum intel_display_power_domain
4462 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
4463 {
4464         struct drm_device *dev = intel_encoder->base.dev;
4465         struct intel_digital_port *intel_dig_port;
4466 
4467         switch (intel_encoder->type) {
4468         case INTEL_OUTPUT_UNKNOWN:
4469                 /* Only DDI platforms should ever use this output type */
4470                 WARN_ON_ONCE(!HAS_DDI(dev));
4471         case INTEL_OUTPUT_DISPLAYPORT:
4472         case INTEL_OUTPUT_HDMI:
4473         case INTEL_OUTPUT_EDP:
4474                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
4475                 return port_to_power_domain(intel_dig_port->port);
4476         case INTEL_OUTPUT_DP_MST:
4477                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
4478                 return port_to_power_domain(intel_dig_port->port);
4479         case INTEL_OUTPUT_ANALOG:
4480                 return POWER_DOMAIN_PORT_CRT;
4481         case INTEL_OUTPUT_DSI:
4482                 return POWER_DOMAIN_PORT_DSI;
4483         default:
4484                 return POWER_DOMAIN_PORT_OTHER;
4485         }
4486 }
4487 
4488 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4489 {
4490         struct drm_device *dev = crtc->dev;
4491         struct intel_encoder *intel_encoder;
4492         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4493         enum pipe pipe = intel_crtc->pipe;
4494         unsigned long mask;
4495         enum transcoder transcoder;
4496 
4497         transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
4498 
4499         mask = BIT(POWER_DOMAIN_PIPE(pipe));
4500         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
4501         if (intel_crtc->config.pch_pfit.enabled ||
4502             intel_crtc->config.pch_pfit.force_thru)
4503                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
4504 
4505         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4506                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
4507 
4508         return mask;
4509 }
4510 
4511 void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4512                                   bool enable)
4513 {
4514         if (dev_priv->power_domains.init_power_on == enable)
4515                 return;
4516 
4517         if (enable)
4518                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4519         else
4520                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4521 
4522         dev_priv->power_domains.init_power_on = enable;
4523 }
4524 
4525 static void modeset_update_crtc_power_domains(struct drm_device *dev)
4526 {
4527         struct drm_i915_private *dev_priv = dev->dev_private;
4528         unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
4529         struct intel_crtc *crtc;
4530 
4531         /*
4532          * First get all needed power domains, then put all unneeded, to avoid
4533          * any unnecessary toggling of the power wells.
4534          */
4535         for_each_intel_crtc(dev, crtc) {
4536                 enum intel_display_power_domain domain;
4537 
4538                 if (!crtc->base.enabled)
4539                         continue;
4540 
4541                 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
4542 
4543                 for_each_power_domain(domain, pipe_domains[crtc->pipe])
4544                         intel_display_power_get(dev_priv, domain);
4545         }
4546 
4547         for_each_intel_crtc(dev, crtc) {
4548                 enum intel_display_power_domain domain;
4549 
4550                 for_each_power_domain(domain, crtc->enabled_power_domains)
4551                         intel_display_power_put(dev_priv, domain);
4552 
4553                 crtc->enabled_power_domains = pipe_domains[crtc->pipe];
4554         }
4555 
4556         intel_display_set_init_power(dev_priv, false);
4557 }
4558 
4559 /* returns HPLL frequency in kHz */
4560 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
4561 {
4562         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
4563 
4564         /* Obtain SKU information */
4565         mutex_lock(&dev_priv->dpio_lock);
4566         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
4567                 CCK_FUSE_HPLL_FREQ_MASK;
4568         mutex_unlock(&dev_priv->dpio_lock);
4569 
4570         return vco_freq[hpll_freq] * 1000;
4571 }
4572 
4573 static void vlv_update_cdclk(struct drm_device *dev)
4574 {
4575         struct drm_i915_private *dev_priv = dev->dev_private;
4576 
4577         dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4578         DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
4579                          dev_priv->vlv_cdclk_freq);
4580 
4581         /*
4582          * Program the gmbus_freq based on the cdclk frequency.
4583          * BSpec erroneously claims we should aim for 4MHz, but
4584          * in fact 1MHz is the correct frequency.
4585          */
4586         I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
4587 }
4588 
4589 /* Adjust CDclk dividers to allow high res or save power if possible */
4590 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4591 {
4592         struct drm_i915_private *dev_priv = dev->dev_private;
4593         u32 val, cmd;
4594 
4595         WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4596 
4597         if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
4598                 cmd = 2;
4599         else if (cdclk == 266667)
4600                 cmd = 1;
4601         else
4602                 cmd = 0;
4603 
4604         mutex_lock(&dev_priv->rps.hw_lock);
4605         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4606         val &= ~DSPFREQGUAR_MASK;
4607         val |= (cmd << DSPFREQGUAR_SHIFT);
4608         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4609         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4610                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
4611                      50)) {
4612                 DRM_ERROR("timed out waiting for CDclk change\n");
4613         }
4614         mutex_unlock(&dev_priv->rps.hw_lock);
4615 
4616         if (cdclk == 400000) {
4617                 u32 divider, vco;
4618 
4619                 vco = valleyview_get_vco(dev_priv);
4620                 divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4621 
4622                 mutex_lock(&dev_priv->dpio_lock);
4623                 /* adjust cdclk divider */
4624                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
4625                 val &= ~DISPLAY_FREQUENCY_VALUES;
4626                 val |= divider;
4627                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
4628 
4629                 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
4630                               DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
4631                              50))
4632                         DRM_ERROR("timed out waiting for CDclk change\n");
4633                 mutex_unlock(&dev_priv->dpio_lock);
4634         }
4635 
4636         mutex_lock(&dev_priv->dpio_lock);
4637         /* adjust self-refresh exit latency value */
4638         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
4639         val &= ~0x7f;
4640 
4641         /*
4642          * For high bandwidth configs, we set a higher latency in the bunit
4643          * so that the core display fetch happens in time to avoid underruns.
4644          */
4645         if (cdclk == 400000)
4646                 val |= 4500 / 250; /* 4.5 usec */
4647         else
4648                 val |= 3000 / 250; /* 3.0 usec */
4649         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
4650         mutex_unlock(&dev_priv->dpio_lock);
4651 
4652         vlv_update_cdclk(dev);
4653 }
4654 
4655 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
4656 {
4657         struct drm_i915_private *dev_priv = dev->dev_private;
4658         u32 val, cmd;
4659 
4660         WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4661 
4662         switch (cdclk) {
4663         case 400000:
4664                 cmd = 3;
4665                 break;
4666         case 333333:
4667         case 320000:
4668                 cmd = 2;
4669                 break;
4670         case 266667:
4671                 cmd = 1;
4672                 break;
4673         case 200000:
4674                 cmd = 0;
4675                 break;
4676         default:
4677                 WARN_ON(1);
4678                 return;
4679         }
4680 
4681         mutex_lock(&dev_priv->rps.hw_lock);
4682         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4683         val &= ~DSPFREQGUAR_MASK_CHV;
4684         val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
4685         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4686         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4687                       DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
4688                      50)) {
4689                 DRM_ERROR("timed out waiting for CDclk change\n");
4690         }
4691         mutex_unlock(&dev_priv->rps.hw_lock);
4692 
4693         vlv_update_cdclk(dev);
4694 }
4695 
4696 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4697                                  int max_pixclk)
4698 {
4699         int vco = valleyview_get_vco(dev_priv);
4700         int freq_320 = (vco <<  1) % 320000 != 0 ? 333333 : 320000;
4701 
4702         /* FIXME: Punit isn't quite ready yet */
4703         if (IS_CHERRYVIEW(dev_priv->dev))
4704                 return 400000;
4705 
4706         /*
4707          * Really only a few cases to deal with, as only 4 CDclks are supported:
4708          *   200MHz
4709          *   267MHz
4710          *   320/333MHz (depends on HPLL freq)
4711          *   400MHz
4712          * So we check to see whether we're above 90% of the lower bin and
4713          * adjust if needed.
4714          *
4715          * We seem to get an unstable or solid color picture at 200MHz.
4716          * Not sure what's wrong. For now use 200MHz only when all pipes
4717          * are off.
4718          */
4719         if (max_pixclk > freq_320*9/10)
4720                 return 400000;
4721         else if (max_pixclk > 266667*9/10)
4722                 return freq_320;
4723         else if (max_pixclk > 0)
4724                 return 266667;
4725         else
4726                 return 200000;
4727 }
4728 
4729 /* compute the max pixel clock for new configuration */
4730 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
4731 {
4732         struct drm_device *dev = dev_priv->dev;
4733         struct intel_crtc *intel_crtc;
4734         int max_pixclk = 0;
4735 
4736         for_each_intel_crtc(dev, intel_crtc) {
4737                 if (intel_crtc->new_enabled)
4738                         max_pixclk = max(max_pixclk,
4739                                          intel_crtc->new_config->adjusted_mode.crtc_clock);
4740         }
4741 
4742         return max_pixclk;
4743 }
4744 
4745 static void valleyview_modeset_global_pipes(struct drm_device *dev,
4746                                             unsigned *prepare_pipes)
4747 {
4748         struct drm_i915_private *dev_priv = dev->dev_private;
4749         struct intel_crtc *intel_crtc;
4750         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4751 
4752         if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
4753             dev_priv->vlv_cdclk_freq)
4754                 return;
4755 
4756         /* disable/enable all currently active pipes while we change cdclk */
4757         for_each_intel_crtc(dev, intel_crtc)
4758                 if (intel_crtc->base.enabled)
4759                         *prepare_pipes |= (1 << intel_crtc->pipe);
4760 }
4761 
4762 static void valleyview_modeset_global_resources(struct drm_device *dev)
4763 {
4764         struct drm_i915_private *dev_priv = dev->dev_private;
4765         int max_pixclk = intel_mode_max_pixclk(dev_priv);
4766         int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4767 
4768         if (req_cdclk != dev_priv->vlv_cdclk_freq) {
4769                 if (IS_CHERRYVIEW(dev))
4770                         cherryview_set_cdclk(dev, req_cdclk);
4771                 else
4772                         valleyview_set_cdclk(dev, req_cdclk);
4773         }
4774 
4775         modeset_update_crtc_power_domains(dev);
4776 }
4777 
4778 static void valleyview_crtc_enable(struct drm_crtc *crtc)
4779 {
4780         struct drm_device *dev = crtc->dev;
4781         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4782         struct intel_encoder *encoder;
4783         int pipe = intel_crtc->pipe;
4784         bool is_dsi;
4785 
4786         WARN_ON(!crtc->enabled);
4787 
4788         if (intel_crtc->active)
4789                 return;
4790 
4791         is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4792 
4793         if (!is_dsi) {
4794                 if (IS_CHERRYVIEW(dev))
4795                         chv_prepare_pll(intel_crtc);
4796                 else
4797                         vlv_prepare_pll(intel_crtc);
4798         }
4799 
4800         if (intel_crtc->config.has_dp_encoder)
4801                 intel_dp_set_m_n(intel_crtc);
4802 
4803         intel_set_pipe_timings(intel_crtc);
4804 
4805         i9xx_set_pipeconf(intel_crtc);
4806 
4807         intel_crtc->active = true;
4808 
4809         intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4810 
4811         for_each_encoder_on_crtc(dev, crtc, encoder)
4812                 if (encoder->pre_pll_enable)
4813                         encoder->pre_pll_enable(encoder);
4814 
4815         if (!is_dsi) {
4816                 if (IS_CHERRYVIEW(dev))
4817                         chv_enable_pll(intel_crtc);
4818                 else
4819                         vlv_enable_pll(intel_crtc);
4820         }
4821 
4822         for_each_encoder_on_crtc(dev, crtc, encoder)
4823                 if (encoder->pre_enable)
4824                         encoder->pre_enable(encoder);
4825 
4826         i9xx_pfit_enable(intel_crtc);
4827 
4828         intel_crtc_load_lut(crtc);
4829 
4830         intel_update_watermarks(crtc);
4831         intel_enable_pipe(intel_crtc);
4832 
4833         for_each_encoder_on_crtc(dev, crtc, encoder)
4834                 encoder->enable(encoder);
4835 
4836         intel_crtc_enable_planes(crtc);
4837 
4838         /* Underruns don't raise interrupts, so check manually. */
4839         i9xx_check_fifo_underruns(dev);
4840 }
4841 
4842 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4843 {
4844         struct drm_device *dev = crtc->base.dev;
4845         struct drm_i915_private *dev_priv = dev->dev_private;
4846 
4847         I915_WRITE(FP0(crtc->pipe), crtc->config.dpll_hw_state.fp0);
4848         I915_WRITE(FP1(crtc->pipe), crtc->config.dpll_hw_state.fp1);
4849 }
4850 
4851 static void i9xx_crtc_enable(struct drm_crtc *crtc)
4852 {
4853         struct drm_device *dev = crtc->dev;
4854         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4855         struct intel_encoder *encoder;
4856         int pipe = intel_crtc->pipe;
4857 
4858         WARN_ON(!crtc->enabled);
4859 
4860         if (intel_crtc->active)
4861                 return;
4862 
4863         i9xx_set_pll_dividers(intel_crtc);
4864 
4865         if (intel_crtc->config.has_dp_encoder)
4866                 intel_dp_set_m_n(intel_crtc);
4867 
4868         intel_set_pipe_timings(intel_crtc);
4869 
4870         i9xx_set_pipeconf(intel_crtc);
4871 
4872         intel_crtc->active = true;
4873 
4874         if (!IS_GEN2(dev))
4875                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4876 
4877         for_each_encoder_on_crtc(dev, crtc, encoder)
4878                 if (encoder->pre_enable)
4879                         encoder->pre_enable(encoder);
4880 
4881         i9xx_enable_pll(intel_crtc);
4882 
4883         i9xx_pfit_enable(intel_crtc);
4884 
4885         intel_crtc_load_lut(crtc);
4886 
4887         intel_update_watermarks(crtc);
4888         intel_enable_pipe(intel_crtc);
4889 
4890         for_each_encoder_on_crtc(dev, crtc, encoder)
4891                 encoder->enable(encoder);
4892 
4893         intel_crtc_enable_planes(crtc);
4894 
4895         /*
4896          * Gen2 reports pipe underruns whenever all planes are disabled.
4897          * So don't enable underrun reporting before at least some planes
4898          * are enabled.
4899          * FIXME: Need to fix the logic to work when we turn off all planes
4900          * but leave the pipe running.
4901          */
4902         if (IS_GEN2(dev))
4903                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
4904 
4905         /* Underruns don't raise interrupts, so check manually. */
4906         i9xx_check_fifo_underruns(dev);
4907 }
4908 
4909 static void i9xx_pfit_disable(struct intel_crtc *crtc)
4910 {
4911         struct drm_device *dev = crtc->base.dev;
4912         struct drm_i915_private *dev_priv = dev->dev_private;
4913 
4914         if (!crtc->config.gmch_pfit.control)
4915                 return;
4916 
4917         assert_pipe_disabled(dev_priv, crtc->pipe);
4918 
4919         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
4920                          I915_READ(PFIT_CONTROL));
4921         I915_WRITE(PFIT_CONTROL, 0);
4922 }
4923 
4924 static void i9xx_crtc_disable(struct drm_crtc *crtc)
4925 {
4926         struct drm_device *dev = crtc->dev;
4927         struct drm_i915_private *dev_priv = dev->dev_private;
4928         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4929         struct intel_encoder *encoder;
4930         int pipe = intel_crtc->pipe;
4931 
4932         if (!intel_crtc->active)
4933                 return;
4934 
4935         /*
4936          * Gen2 reports pipe underruns whenever all planes are disabled.
4937          * So diasble underrun reporting before all the planes get disabled.
4938          * FIXME: Need to fix the logic to work when we turn off all planes
4939          * but leave the pipe running.
4940          */
4941         if (IS_GEN2(dev))
4942                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4943 
4944         /*
4945          * Vblank time updates from the shadow to live plane control register
4946          * are blocked if the memory self-refresh mode is active at that
4947          * moment. So to make sure the plane gets truly disabled, disable
4948          * first the self-refresh mode. The self-refresh enable bit in turn
4949          * will be checked/applied by the HW only at the next frame start
4950          * event which is after the vblank start event, so we need to have a
4951          * wait-for-vblank between disabling the plane and the pipe.
4952          */
4953         intel_set_memory_cxsr(dev_priv, false);
4954         intel_crtc_disable_planes(crtc);
4955 
4956         for_each_encoder_on_crtc(dev, crtc, encoder)
4957                 encoder->disable(encoder);
4958 
4959         /*
4960          * On gen2 planes are double buffered but the pipe isn't, so we must
4961          * wait for planes to fully turn off before disabling the pipe.
4962          * We also need to wait on all gmch platforms because of the
4963          * self-refresh mode constraint explained above.
4964          */
4965         intel_wait_for_vblank(dev, pipe);
4966 
4967         intel_disable_pipe(intel_crtc);
4968 
4969         i9xx_pfit_disable(intel_crtc);
4970 
4971         for_each_encoder_on_crtc(dev, crtc, encoder)
4972                 if (encoder->post_disable)
4973                         encoder->post_disable(encoder);
4974 
4975         if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
4976                 if (IS_CHERRYVIEW(dev))
4977                         chv_disable_pll(dev_priv, pipe);
4978                 else if (IS_VALLEYVIEW(dev))
4979                         vlv_disable_pll(dev_priv, pipe);
4980                 else
4981                         i9xx_disable_pll(intel_crtc);
4982         }
4983 
4984         if (!IS_GEN2(dev))
4985                 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
4986 
4987         intel_crtc->active = false;
4988         intel_update_watermarks(crtc);
4989 
4990         mutex_lock(&dev->struct_mutex);
4991         intel_update_fbc(dev);
4992         mutex_unlock(&dev->struct_mutex);
4993 }
4994 
4995 static void i9xx_crtc_off(struct drm_crtc *crtc)
4996 {
4997 }
4998 
4999 static void intel_crtc_update_sarea(struct drm_crtc *crtc,
5000                                     bool enabled)
5001 {
5002         struct drm_device *dev = crtc->dev;
5003         struct drm_i915_master_private *master_priv;
5004         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5005         int pipe = intel_crtc->pipe;
5006 
5007         if (!dev->primary->master)
5008                 return;
5009 
5010         master_priv = dev->primary->master->driver_priv;
5011         if (!master_priv->sarea_priv)
5012                 return;
5013 
5014         switch (pipe) {
5015         case 0:
5016                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
5017                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
5018                 break;
5019         case 1:
5020                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
5021                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
5022                 break;
5023         default:
5024                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
5025                 break;
5026         }
5027 }
5028 
5029 /* Master function to enable/disable CRTC and corresponding power wells */
5030 void intel_crtc_control(struct drm_crtc *crtc, bool enable)
5031 {
5032         struct drm_device *dev = crtc->dev;
5033         struct drm_i915_private *dev_priv = dev->dev_private;
5034         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5035         enum intel_display_power_domain domain;
5036         unsigned long domains;
5037 
5038         if (enable) {
5039                 if (!intel_crtc->active) {
5040                         domains = get_crtc_power_domains(crtc);
5041                         for_each_power_domain(domain, domains)
5042                                 intel_display_power_get(dev_priv, domain);
5043                         intel_crtc->enabled_power_domains = domains;
5044 
5045                         dev_priv->display.crtc_enable(crtc);
5046                 }
5047         } else {
5048                 if (intel_crtc->active) {
5049                         dev_priv->display.crtc_disable(crtc);
5050 
5051                         domains = intel_crtc->enabled_power_domains;
5052                         for_each_power_domain(domain, domains)
5053                                 intel_display_power_put(dev_priv, domain);
5054                         intel_crtc->enabled_power_domains = 0;
5055                 }
5056         }
5057 }
5058 
5059 /**
5060  * Sets the power management mode of the pipe and plane.
5061  */
5062 void intel_crtc_update_dpms(struct drm_crtc *crtc)
5063 {
5064         struct drm_device *dev = crtc->dev;
5065         struct intel_encoder *intel_encoder;
5066         bool enable = false;
5067 
5068         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5069                 enable |= intel_encoder->connectors_active;
5070 
5071         intel_crtc_control(crtc, enable);
5072 
5073         intel_crtc_update_sarea(crtc, enable);
5074 }
5075 
5076 static void intel_crtc_disable(struct drm_crtc *crtc)
5077 {
5078         struct drm_device *dev = crtc->dev;
5079         struct drm_connector *connector;
5080         struct drm_i915_private *dev_priv = dev->dev_private;
5081         struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
5082         enum pipe pipe = to_intel_crtc(crtc)->pipe;
5083 
5084         /* crtc should still be enabled when we disable it. */
5085         WARN_ON(!crtc->enabled);
5086 
5087         dev_priv->display.crtc_disable(crtc);
5088         intel_crtc_update_sarea(crtc, false);
5089         dev_priv->display.off(crtc);
5090 
5091         if (crtc->primary->fb) {
5092                 mutex_lock(&dev->struct_mutex);
5093                 intel_unpin_fb_obj(old_obj);
5094                 i915_gem_track_fb(old_obj, NULL,
5095                                   INTEL_FRONTBUFFER_PRIMARY(pipe));
5096                 mutex_unlock(&dev->struct_mutex);
5097                 crtc->primary->fb = NULL;
5098         }
5099 
5100         /* Update computed state. */
5101         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
5102                 if (!connector->encoder || !connector->encoder->crtc)
5103                         continue;
5104 
5105                 if (connector->encoder->crtc != crtc)
5106                         continue;
5107 
5108                 connector->dpms = DRM_MODE_DPMS_OFF;
5109                 to_intel_encoder(connector->encoder)->connectors_active = false;
5110         }
5111 }
5112 
5113 void intel_encoder_destroy(struct drm_encoder *encoder)
5114 {
5115         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5116 
5117         drm_encoder_cleanup(encoder);
5118         kfree(intel_encoder);
5119 }
5120 
5121 /* Simple dpms helper for encoders with just one connector, no cloning and only
5122  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
5123  * state of the entire output pipe. */
5124 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
5125 {
5126         if (mode == DRM_MODE_DPMS_ON) {
5127                 encoder->connectors_active = true;
5128 
5129                 intel_crtc_update_dpms(encoder->base.crtc);
5130         } else {
5131                 encoder->connectors_active = false;
5132 
5133                 intel_crtc_update_dpms(encoder->base.crtc);
5134         }
5135 }
5136 
5137 /* Cross check the actual hw state with our own modeset state tracking (and it's
5138  * internal consistency). */
5139 static void intel_connector_check_state(struct intel_connector *connector)
5140 {
5141         if (connector->get_hw_state(connector)) {
5142                 struct intel_encoder *encoder = connector->encoder;
5143                 struct drm_crtc *crtc;
5144                 bool encoder_enabled;
5145                 enum pipe pipe;
5146 
5147                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5148                               connector->base.base.id,
5149                               connector->base.name);
5150 
5151                 /* there is no real hw state for MST connectors */
5152                 if (connector->mst_port)
5153                         return;
5154 
5155                 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
5156                      "wrong connector dpms state\n");
5157                 WARN(connector->base.encoder != &encoder->base,
5158                      "active connector not linked to encoder\n");
5159 
5160                 if (encoder) {
5161                         WARN(!encoder->connectors_active,
5162                              "encoder->connectors_active not set\n");
5163 
5164                         encoder_enabled = encoder->get_hw_state(encoder, &pipe);
5165                         WARN(!encoder_enabled, "encoder not enabled\n");
5166                         if (WARN_ON(!encoder->base.crtc))
5167                                 return;
5168 
5169                         crtc = encoder->base.crtc;
5170 
5171                         WARN(!crtc->enabled, "crtc not enabled\n");
5172                         WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
5173                         WARN(pipe != to_intel_crtc(crtc)->pipe,
5174                              "encoder active on the wrong pipe\n");
5175                 }
5176         }
5177 }
5178 
5179 /* Even simpler default implementation, if there's really no special case to
5180  * consider. */
5181 void intel_connector_dpms(struct drm_connector *connector, int mode)
5182 {
5183         /* All the simple cases only support two dpms states. */
5184         if (mode != DRM_MODE_DPMS_ON)
5185                 mode = DRM_MODE_DPMS_OFF;
5186 
5187         if (mode == connector->dpms)
5188                 return;
5189 
5190         connector->dpms = mode;
5191 
5192         /* Only need to change hw state when actually enabled */
5193         if (connector->encoder)
5194                 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
5195 
5196         intel_modeset_check_state(connector->dev);
5197 }
5198 
5199 /* Simple connector->get_hw_state implementation for encoders that support only
5200  * one connector and no cloning and hence the encoder state determines the state
5201  * of the connector. */
5202 bool intel_connector_get_hw_state(struct intel_connector *connector)
5203 {
5204         enum pipe pipe = 0;
5205         struct intel_encoder *encoder = connector->encoder;
5206 
5207         return encoder->get_hw_state(encoder, &pipe);
5208 }
5209 
5210 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5211                                      struct intel_crtc_config *pipe_config)
5212 {
5213         struct drm_i915_private *dev_priv = dev->dev_private;
5214         struct intel_crtc *pipe_B_crtc =
5215                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5216 
5217         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
5218                       pipe_name(pipe), pipe_config->fdi_lanes);
5219         if (pipe_config->fdi_lanes > 4) {
5220                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
5221                               pipe_name(pipe), pipe_config->fdi_lanes);
5222                 return false;
5223         }
5224 
5225         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
5226                 if (pipe_config->fdi_lanes > 2) {
5227                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
5228                                       pipe_config->fdi_lanes);
5229                         return false;
5230                 } else {
5231                         return true;
5232                 }
5233         }
5234 
5235         if (INTEL_INFO(dev)->num_pipes == 2)
5236                 return true;
5237 
5238         /* Ivybridge 3 pipe is really complicated */
5239         switch (pipe) {
5240         case PIPE_A:
5241                 return true;
5242         case PIPE_B:
5243                 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5244                     pipe_config->fdi_lanes > 2) {
5245                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5246                                       pipe_name(pipe), pipe_config->fdi_lanes);
5247                         return false;
5248                 }
5249                 return true;
5250         case PIPE_C:
5251                 if (!pipe_has_enabled_pch(pipe_B_crtc) ||
5252                     pipe_B_crtc->config.fdi_lanes <= 2) {
5253                         if (pipe_config->fdi_lanes > 2) {
5254                                 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
5255                                               pipe_name(pipe), pipe_config->fdi_lanes);
5256                                 return false;
5257                         }
5258                 } else {
5259                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5260                         return false;
5261                 }
5262                 return true;
5263         default:
5264                 BUG();
5265         }
5266 }
5267 
5268 #define RETRY 1
5269 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5270                                        struct intel_crtc_config *pipe_config)
5271 {
5272         struct drm_device *dev = intel_crtc->base.dev;
5273         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5274         int lane, link_bw, fdi_dotclock;
5275         bool setup_ok, needs_recompute = false;
5276 
5277 retry:
5278         /* FDI is a binary signal running at ~2.7GHz, encoding
5279          * each output octet as 10 bits. The actual frequency
5280          * is stored as a divider into a 100MHz clock, and the
5281          * mode pixel clock is stored in units of 1KHz.
5282          * Hence the bw of each lane in terms of the mode signal
5283          * is:
5284          */
5285         link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5286 
5287         fdi_dotclock = adjusted_mode->crtc_clock;
5288 
5289         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
5290                                            pipe_config->pipe_bpp);
5291 
5292         pipe_config->fdi_lanes = lane;
5293 
5294         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
5295                                link_bw, &pipe_config->fdi_m_n);
5296 
5297         setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
5298                                             intel_crtc->pipe, pipe_config);
5299         if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
5300                 pipe_config->pipe_bpp -= 2*3;
5301                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
5302                               pipe_config->pipe_bpp);
5303                 needs_recompute = true;
5304                 pipe_config->bw_constrained = true;
5305 
5306                 goto retry;
5307         }
5308 
5309         if (needs_recompute)
5310                 return RETRY;
5311 
5312         return setup_ok ? 0 : -EINVAL;
5313 }
5314 
5315 static void hsw_compute_ips_config(struct intel_crtc *crtc,
5316                                    struct intel_crtc_config *pipe_config)
5317 {
5318         pipe_config->ips_enabled = i915.enable_ips &&
5319                                    hsw_crtc_supports_ips(crtc) &&
5320                                    pipe_config->pipe_bpp <= 24;
5321 }
5322 
5323 static int intel_crtc_compute_config(struct intel_crtc *crtc,
5324                                      struct intel_crtc_config *pipe_config)
5325 {
5326         struct drm_device *dev = crtc->base.dev;
5327         struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5328 
5329         /* FIXME should check pixel clock limits on all platforms */
5330         if (INTEL_INFO(dev)->gen < 4) {
5331                 struct drm_i915_private *dev_priv = dev->dev_private;
5332                 int clock_limit =
5333                         dev_priv->display.get_display_clock_speed(dev);
5334 
5335                 /*
5336                  * Enable pixel doubling when the dot clock
5337                  * is > 90% of the (display) core speed.
5338                  *
5339                  * GDG double wide on either pipe,
5340                  * otherwise pipe A only.
5341                  */
5342                 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
5343                     adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
5344                         clock_limit *= 2;
5345                         pipe_config->double_wide = true;
5346                 }
5347 
5348                 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
5349                         return -EINVAL;
5350         }
5351 
5352         /*
5353          * Pipe horizontal size must be even in:
5354          * - DVO ganged mode
5355          * - LVDS dual channel mode
5356          * - Double wide pipe
5357          */
5358         if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5359              intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5360                 pipe_config->pipe_src_w &= ~1;
5361 
5362         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
5363          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
5364          */
5365         if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
5366                 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
5367                 return -EINVAL;
5368 
5369         if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5370                 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
5371         } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5372                 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
5373                  * for lvds. */
5374                 pipe_config->pipe_bpp = 8*3;
5375         }
5376 
5377         if (HAS_IPS(dev))
5378                 hsw_compute_ips_config(crtc, pipe_config);
5379 
5380         /*
5381          * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
5382          * old clock survives for now.
5383          */
5384         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
5385                 pipe_config->shared_dpll = crtc->config.shared_dpll;
5386 
5387         if (pipe_config->has_pch_encoder)
5388                 return ironlake_fdi_compute_config(crtc, pipe_config);
5389 
5390         return 0;
5391 }
5392 
5393 static int valleyview_get_display_clock_speed(struct drm_device *dev)
5394 {
5395         struct drm_i915_private *dev_priv = dev->dev_private;
5396         int vco = valleyview_get_vco(dev_priv);
5397         u32 val;
5398         int divider;
5399 
5400         /* FIXME: Punit isn't quite ready yet */
5401         if (IS_CHERRYVIEW(dev))
5402                 return 400000;
5403 
5404         mutex_lock(&dev_priv->dpio_lock);
5405         val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5406         mutex_unlock(&dev_priv->dpio_lock);
5407 
5408         divider = val & DISPLAY_FREQUENCY_VALUES;
5409 
5410         WARN((val & DISPLAY_FREQUENCY_STATUS) !=
5411              (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5412              "cdclk change in progress\n");
5413 
5414         return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
5415 }
5416 
5417 static int i945_get_display_clock_speed(struct drm_device *dev)
5418 {
5419         return 400000;
5420 }
5421 
5422 static int i915_get_display_clock_speed(struct drm_device *dev)
5423 {
5424         return 333000;
5425 }
5426 
5427 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
5428 {
5429         return 200000;
5430 }
5431 
5432 static int pnv_get_display_clock_speed(struct drm_device *dev)
5433 {
5434         u16 gcfgc = 0;
5435 
5436         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5437 
5438         switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5439         case GC_DISPLAY_CLOCK_267_MHZ_PNV:
5440                 return 267000;
5441         case GC_DISPLAY_CLOCK_333_MHZ_PNV:
5442                 return 333000;
5443         case GC_DISPLAY_CLOCK_444_MHZ_PNV:
5444                 return 444000;
5445         case GC_DISPLAY_CLOCK_200_MHZ_PNV:
5446                 return 200000;
5447         default:
5448                 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
5449         case GC_DISPLAY_CLOCK_133_MHZ_PNV:
5450                 return 133000;
5451         case GC_DISPLAY_CLOCK_167_MHZ_PNV:
5452                 return 167000;
5453         }
5454 }
5455 
5456 static int i915gm_get_display_clock_speed(struct drm_device *dev)
5457 {
5458         u16 gcfgc = 0;
5459 
5460         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
5461 
5462         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
5463                 return 133000;
5464         else {
5465                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
5466                 case GC_DISPLAY_CLOCK_333_MHZ:
5467                         return 333000;
5468                 default:
5469                 case GC_DISPLAY_CLOCK_190_200_MHZ:
5470                         return 190000;
5471                 }
5472         }
5473 }
5474 
5475 static int i865_get_display_clock_speed(struct drm_device *dev)
5476 {
5477         return 266000;
5478 }
5479 
5480 static int i855_get_display_clock_speed(struct drm_device *dev)
5481 {
5482         u16 hpllcc = 0;
5483         /* Assume that the hardware is in the high speed state.  This
5484          * should be the default.
5485          */
5486         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
5487         case GC_CLOCK_133_200:
5488         case GC_CLOCK_100_200:
5489                 return 200000;
5490         case GC_CLOCK_166_250:
5491                 return 250000;
5492         case GC_CLOCK_100_133:
5493                 return 133000;
5494         }
5495 
5496         /* Shouldn't happen */
5497         return 0;
5498 }
5499 
5500 static int i830_get_display_clock_speed(struct drm_device *dev)
5501 {
5502         return 133000;
5503 }
5504 
5505 static void
5506 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
5507 {
5508         while (*num > DATA_LINK_M_N_MASK ||
5509                *den > DATA_LINK_M_N_MASK) {
5510                 *num >>= 1;
5511                 *den >>= 1;
5512         }
5513 }
5514 
5515 static void compute_m_n(unsigned int m, unsigned int n,
5516                         uint32_t *ret_m, uint32_t *ret_n)
5517 {
5518         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
5519         *ret_m = div_u64((uint64_t) m * *ret_n, n);
5520         intel_reduce_m_n_ratio(ret_m, ret_n);
5521 }
5522 
5523 void
5524 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
5525                        int pixel_clock, int link_clock,
5526                        struct intel_link_m_n *m_n)
5527 {
5528         m_n->tu = 64;
5529 
5530         compute_m_n(bits_per_pixel * pixel_clock,
5531                     link_clock * nlanes * 8,
5532                     &m_n->gmch_m, &m_n->gmch_n);
5533 
5534         compute_m_n(pixel_clock, link_clock,
5535                     &m_n->link_m, &m_n->link_n);
5536 }
5537 
5538 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5539 {
5540         if (i915.panel_use_ssc >= 0)
5541                 return i915.panel_use_ssc != 0;
5542         return dev_priv->vbt.lvds_use_ssc
5543                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5544 }
5545 
5546 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5547 {
5548         struct drm_device *dev = crtc->dev;
5549         struct drm_i915_private *dev_priv = dev->dev_private;
5550         int refclk;
5551 
5552         if (IS_VALLEYVIEW(dev)) {
5553                 refclk = 100000;
5554         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5555             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5556                 refclk = dev_priv->vbt.lvds_ssc_freq;
5557                 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5558         } else if (!IS_GEN2(dev)) {
5559                 refclk = 96000;
5560         } else {
5561                 refclk = 48000;
5562         }
5563 
5564         return refclk;
5565 }
5566 
5567 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
5568 {
5569         return (1 << dpll->n) << 16 | dpll->m2;
5570 }
5571 
5572 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
5573 {
5574         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
5575 }
5576 
5577 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5578                                      intel_clock_t *reduced_clock)
5579 {
5580         struct drm_device *dev = crtc->base.dev;
5581         u32 fp, fp2 = 0;
5582 
5583         if (IS_PINEVIEW(dev)) {
5584                 fp = pnv_dpll_compute_fp(&crtc->config.dpll);
5585                 if (reduced_clock)
5586                         fp2 = pnv_dpll_compute_fp(reduced_clock);
5587         } else {
5588                 fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
5589                 if (reduced_clock)
5590                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
5591         }
5592 
5593         crtc->config.dpll_hw_state.fp0 = fp;
5594 
5595         crtc->lowfreq_avail = false;
5596         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5597             reduced_clock && i915.powersave) {
5598                 crtc->config.dpll_hw_state.fp1 = fp2;
5599                 crtc->lowfreq_avail = true;
5600         } else {
5601                 crtc->config.dpll_hw_state.fp1 = fp;
5602         }
5603 }
5604 
5605 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
5606                 pipe)
5607 {
5608         u32 reg_val;
5609 
5610         /*
5611          * PLLB opamp always calibrates to max value of 0x3f, force enable it
5612          * and set it to a reasonable value instead.
5613          */
5614         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5615         reg_val &= 0xffffff00;
5616         reg_val |= 0x00000030;
5617         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5618 
5619         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5620         reg_val &= 0x8cffffff;
5621         reg_val = 0x8c000000;
5622         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5623 
5624         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
5625         reg_val &= 0xffffff00;
5626         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
5627 
5628         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
5629         reg_val &= 0x00ffffff;
5630         reg_val |= 0xb0000000;
5631         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
5632 }
5633 
5634 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5635                                          struct intel_link_m_n *m_n)
5636 {
5637         struct drm_device *dev = crtc->base.dev;
5638         struct drm_i915_private *dev_priv = dev->dev_private;
5639         int pipe = crtc->pipe;
5640 
5641         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5642         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
5643         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
5644         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
5645 }
5646 
5647 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5648                                          struct intel_link_m_n *m_n,
5649                                          struct intel_link_m_n *m2_n2)
5650 {
5651         struct drm_device *dev = crtc->base.dev;
5652         struct drm_i915_private *dev_priv = dev->dev_private;
5653         int pipe = crtc->pipe;
5654         enum transcoder transcoder = crtc->config.cpu_transcoder;
5655 
5656         if (INTEL_INFO(dev)->gen >= 5) {
5657                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
5658                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5659                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5660                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5661                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
5662                  * for gen < 8) and if DRRS is supported (to make sure the
5663                  * registers are not unnecessarily accessed).
5664                  */
5665                 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
5666                         crtc->config.has_drrs) {
5667                         I915_WRITE(PIPE_DATA_M2(transcoder),
5668                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5669                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
5670                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
5671                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
5672                 }
5673         } else {
5674                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5675                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5676                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
5677                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
5678         }
5679 }
5680 
5681 void intel_dp_set_m_n(struct intel_crtc *crtc)
5682 {
5683         if (crtc->config.has_pch_encoder)
5684                 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5685         else
5686                 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
5687                                                    &crtc->config.dp_m2_n2);
5688 }
5689 
5690 static void vlv_update_pll(struct intel_crtc *crtc)
5691 {
5692         u32 dpll, dpll_md;
5693 
5694         /*
5695          * Enable DPIO clock input. We should never disable the reference
5696          * clock for pipe B, since VGA hotplug / manual detection depends
5697          * on it.
5698          */
5699         dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
5700                 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
5701         /* We should never disable this, set it here for state tracking */
5702         if (crtc->pipe == PIPE_B)
5703                 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5704         dpll |= DPLL_VCO_ENABLE;
5705         crtc->config.dpll_hw_state.dpll = dpll;
5706 
5707         dpll_md = (crtc->config.pixel_multiplier - 1)
5708                 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5709         crtc->config.dpll_hw_state.dpll_md = dpll_md;
5710 }
5711 
5712 static void vlv_prepare_pll(struct intel_crtc *crtc)
5713 {
5714         struct drm_device *dev = crtc->base.dev;
5715         struct drm_i915_private *dev_priv = dev->dev_private;
5716         int pipe = crtc->pipe;
5717         u32 mdiv;
5718         u32 bestn, bestm1, bestm2, bestp1, bestp2;
5719         u32 coreclk, reg_val;
5720 
5721         mutex_lock(&dev_priv->dpio_lock);
5722 
5723         bestn = crtc->config.dpll.n;
5724         bestm1 = crtc->config.dpll.m1;
5725         bestm2 = crtc->config.dpll.m2;
5726         bestp1 = crtc->config.dpll.p1;
5727         bestp2 = crtc->config.dpll.p2;
5728 
5729         /* See eDP HDMI DPIO driver vbios notes doc */
5730 
5731         /* PLL B needs special handling */
5732         if (pipe == PIPE_B)
5733                 vlv_pllb_recal_opamp(dev_priv, pipe);
5734 
5735         /* Set up Tx target for periodic Rcomp update */
5736         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
5737 
5738         /* Disable target IRef on PLL */
5739         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
5740         reg_val &= 0x00ffffff;
5741         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
5742 
5743         /* Disable fast lock */
5744         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
5745 
5746         /* Set idtafcrecal before PLL is enabled */
5747         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
5748         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
5749         mdiv |= ((bestn << DPIO_N_SHIFT));
5750         mdiv |= (1 << DPIO_K_SHIFT);
5751 
5752         /*
5753          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
5754          * but we don't support that).
5755          * Note: don't use the DAC post divider as it seems unstable.
5756          */
5757         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5758         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5759 
5760         mdiv |= DPIO_ENABLE_CALIBRATION;
5761         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5762 
5763         /* Set HBR and RBR LPF coefficients */
5764         if (crtc->config.port_clock == 162000 ||
5765             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
5766             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
5767                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5768                                  0x009f0003);
5769         else
5770                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5771                                  0x00d0000f);
5772 
5773         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
5774             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5775                 /* Use SSC source */
5776                 if (pipe == PIPE_A)
5777                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5778                                          0x0df40000);
5779                 else
5780                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5781                                          0x0df70000);
5782         } else { /* HDMI or VGA */
5783                 /* Use bend source */
5784                 if (pipe == PIPE_A)
5785                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5786                                          0x0df70000);
5787                 else
5788                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5789                                          0x0df40000);
5790         }
5791 
5792         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5793         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5794         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
5795             intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
5796                 coreclk |= 0x01000000;
5797         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5798 
5799         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
5800         mutex_unlock(&dev_priv->dpio_lock);
5801 }
5802 
5803 static void chv_update_pll(struct intel_crtc *crtc)
5804 {
5805         crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5806                 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5807                 DPLL_VCO_ENABLE;
5808         if (crtc->pipe != PIPE_A)
5809                 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5810 
5811         crtc->config.dpll_hw_state.dpll_md =
5812                 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5813 }
5814 
5815 static void chv_prepare_pll(struct intel_crtc *crtc)
5816 {
5817         struct drm_device *dev = crtc->base.dev;
5818         struct drm_i915_private *dev_priv = dev->dev_private;
5819         int pipe = crtc->pipe;
5820         int dpll_reg = DPLL(crtc->pipe);
5821         enum dpio_channel port = vlv_pipe_to_channel(pipe);
5822         u32 loopfilter, intcoeff;
5823         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5824         int refclk;
5825 
5826         bestn = crtc->config.dpll.n;
5827         bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5828         bestm1 = crtc->config.dpll.m1;
5829         bestm2 = crtc->config.dpll.m2 >> 22;
5830         bestp1 = crtc->config.dpll.p1;
5831         bestp2 = crtc->config.dpll.p2;
5832 
5833         /*
5834          * Enable Refclk and SSC
5835          */
5836         I915_WRITE(dpll_reg,
5837                    crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5838 
5839         mutex_lock(&dev_priv->dpio_lock);
5840 
5841         /* p1 and p2 divider */
5842         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
5843                         5 << DPIO_CHV_S1_DIV_SHIFT |
5844                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
5845                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
5846                         1 << DPIO_CHV_K_DIV_SHIFT);
5847 
5848         /* Feedback post-divider - m2 */
5849         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
5850 
5851         /* Feedback refclk divider - n and m1 */
5852         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
5853                         DPIO_CHV_M1_DIV_BY_2 |
5854                         1 << DPIO_CHV_N_DIV_SHIFT);
5855 
5856         /* M2 fraction division */
5857         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
5858 
5859         /* M2 fraction division enable */
5860         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port),
5861                        DPIO_CHV_FRAC_DIV_EN |
5862                        (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5863 
5864         /* Loop filter */
5865         refclk = i9xx_get_refclk(&crtc->base, 0);
5866         loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5867                 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5868         if (refclk == 100000)
5869                 intcoeff = 11;
5870         else if (refclk == 38400)
5871                 intcoeff = 10;
5872         else
5873                 intcoeff = 9;
5874         loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT;
5875         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
5876 
5877         /* AFC Recal */
5878         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
5879                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
5880                         DPIO_AFC_RECAL);
5881 
5882         mutex_unlock(&dev_priv->dpio_lock);
5883 }
5884 
5885 static void i9xx_update_pll(struct intel_crtc *crtc,
5886                             intel_clock_t *reduced_clock,
5887                             int num_connectors)
5888 {
5889         struct drm_device *dev = crtc->base.dev;
5890         struct drm_i915_private *dev_priv = dev->dev_private;
5891         u32 dpll;
5892         bool is_sdvo;
5893         struct dpll *clock = &crtc->config.dpll;
5894 
5895         i9xx_update_pll_dividers(crtc, reduced_clock);
5896 
5897         is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
5898                 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
5899 
5900         dpll = DPLL_VGA_MODE_DIS;
5901 
5902         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
5903                 dpll |= DPLLB_MODE_LVDS;
5904         else
5905                 dpll |= DPLLB_MODE_DAC_SERIAL;
5906 
5907         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5908                 dpll |= (crtc->config.pixel_multiplier - 1)
5909                         << SDVO_MULTIPLIER_SHIFT_HIRES;
5910         }
5911 
5912         if (is_sdvo)
5913                 dpll |= DPLL_SDVO_HIGH_SPEED;
5914 
5915         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
5916                 dpll |= DPLL_SDVO_HIGH_SPEED;
5917 
5918         /* compute bitmask from p1 value */
5919         if (IS_PINEVIEW(dev))
5920                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5921         else {
5922                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5923                 if (IS_G4X(dev) && reduced_clock)
5924                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5925         }
5926         switch (clock->p2) {
5927         case 5:
5928                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5929                 break;
5930         case 7:
5931                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5932                 break;
5933         case 10:
5934                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5935                 break;
5936         case 14:
5937                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5938                 break;
5939         }
5940         if (INTEL_INFO(dev)->gen >= 4)
5941                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5942 
5943         if (crtc->config.sdvo_tv_clock)
5944                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5945         else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5946                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5947                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5948         else
5949                 dpll |= PLL_REF_INPUT_DREFCLK;
5950 
5951         dpll |= DPLL_VCO_ENABLE;
5952         crtc->config.dpll_hw_state.dpll = dpll;
5953 
5954         if (INTEL_INFO(dev)->gen >= 4) {
5955                 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
5956                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5957                 crtc->config.dpll_hw_state.dpll_md = dpll_md;
5958         }
5959 }
5960 
5961 static void i8xx_update_pll(struct intel_crtc *crtc,
5962                             intel_clock_t *reduced_clock,
5963                             int num_connectors)
5964 {
5965         struct drm_device *dev = crtc->base.dev;
5966         struct drm_i915_private *dev_priv = dev->dev_private;
5967         u32 dpll;
5968         struct dpll *clock = &crtc->config.dpll;
5969 
5970         i9xx_update_pll_dividers(crtc, reduced_clock);
5971 
5972         dpll = DPLL_VGA_MODE_DIS;
5973 
5974         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
5975                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5976         } else {
5977                 if (clock->p1 == 2)
5978                         dpll |= PLL_P1_DIVIDE_BY_TWO;
5979                 else
5980                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5981                 if (clock->p2 == 4)
5982                         dpll |= PLL_P2_DIVIDE_BY_4;
5983         }
5984 
5985         if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5986                 dpll |= DPLL_DVO_2X_MODE;
5987 
5988         if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
5989                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5990                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5991         else
5992                 dpll |= PLL_REF_INPUT_DREFCLK;
5993 
5994         dpll |= DPLL_VCO_ENABLE;
5995         crtc->config.dpll_hw_state.dpll = dpll;
5996 }
5997 
5998 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5999 {
6000         struct drm_device *dev = intel_crtc->base.dev;
6001         struct drm_i915_private *dev_priv = dev->dev_private;
6002         enum pipe pipe = intel_crtc->pipe;
6003         enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
6004         struct drm_display_mode *adjusted_mode =
6005                 &intel_crtc->config.adjusted_mode;
6006         uint32_t crtc_vtotal, crtc_vblank_end;
6007         int vsyncshift = 0;
6008 
6009         /* We need to be careful not to changed the adjusted mode, for otherwise
6010          * the hw state checker will get angry at the mismatch. */
6011         crtc_vtotal = adjusted_mode->crtc_vtotal;
6012         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
6013 
6014         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6015                 /* the chip adds 2 halflines automatically */
6016                 crtc_vtotal -= 1;
6017                 crtc_vblank_end -= 1;
6018 
6019                 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
6020                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6021                 else
6022                         vsyncshift = adjusted_mode->crtc_hsync_start -
6023                                 adjusted_mode->crtc_htotal / 2;
6024                 if (vsyncshift < 0)
6025                         vsyncshift += adjusted_mode->crtc_htotal;
6026         }
6027 
6028         if (INTEL_INFO(dev)->gen > 3)
6029                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
6030 
6031         I915_WRITE(HTOTAL(cpu_transcoder),
6032                    (adjusted_mode->crtc_hdisplay - 1) |
6033                    ((adjusted_mode->crtc_htotal - 1) << 16));
6034         I915_WRITE(HBLANK(cpu_transcoder),
6035                    (adjusted_mode->crtc_hblank_start - 1) |
6036                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
6037         I915_WRITE(HSYNC(cpu_transcoder),
6038                    (adjusted_mode->crtc_hsync_start - 1) |
6039                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
6040 
6041         I915_WRITE(VTOTAL(cpu_transcoder),
6042                    (adjusted_mode->crtc_vdisplay - 1) |
6043                    ((crtc_vtotal - 1) << 16));
6044         I915_WRITE(VBLANK(cpu_transcoder),
6045                    (adjusted_mode->crtc_vblank_start - 1) |
6046                    ((crtc_vblank_end - 1) << 16));
6047         I915_WRITE(VSYNC(cpu_transcoder),
6048                    (adjusted_mode->crtc_vsync_start - 1) |
6049                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
6050 
6051         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
6052          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
6053          * documented on the DDI_FUNC_CTL register description, EDP Input Select
6054          * bits. */
6055         if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
6056             (pipe == PIPE_B || pipe == PIPE_C))
6057                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
6058 
6059         /* pipesrc controls the size that is scaled from, which should
6060          * always be the user's requested size.
6061          */
6062         I915_WRITE(PIPESRC(pipe),
6063                    ((intel_crtc->config.pipe_src_w - 1) << 16) |
6064                    (intel_crtc->config.pipe_src_h - 1));
6065 }
6066 
6067 static void intel_get_pipe_timings(struct intel_crtc *crtc,
6068                                    struct intel_crtc_config *pipe_config)
6069 {
6070         struct drm_device *dev = crtc->base.dev;
6071         struct drm_i915_private *dev_priv = dev->dev_private;
6072         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6073         uint32_t tmp;
6074 
6075         tmp = I915_READ(HTOTAL(cpu_transcoder));
6076         pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
6077         pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
6078         tmp = I915_READ(HBLANK(cpu_transcoder));
6079         pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
6080         pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
6081         tmp = I915_READ(HSYNC(cpu_transcoder));
6082         pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
6083         pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
6084 
6085         tmp = I915_READ(VTOTAL(cpu_transcoder));
6086         pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
6087         pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
6088         tmp = I915_READ(VBLANK(cpu_transcoder));
6089         pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
6090         pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
6091         tmp = I915_READ(VSYNC(cpu_transcoder));
6092         pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
6093         pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
6094 
6095         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
6096                 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
6097                 pipe_config->adjusted_mode.crtc_vtotal += 1;
6098                 pipe_config->adjusted_mode.crtc_vblank_end += 1;
6099         }
6100 
6101         tmp = I915_READ(PIPESRC(crtc->pipe));
6102         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
6103         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
6104 
6105         pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
6106         pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
6107 }
6108 
6109 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
6110                                  struct intel_crtc_config *pipe_config)
6111 {
6112         mode->hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
6113         mode->htotal = pipe_config->adjusted_mode.crtc_htotal;
6114         mode->hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
6115         mode->hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
6116 
6117         mode->vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
6118         mode->vtotal = pipe_config->adjusted_mode.crtc_vtotal;
6119         mode->vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
6120         mode->vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
6121 
6122         mode->flags = pipe_config->adjusted_mode.flags;
6123 
6124         mode->clock = pipe_config->adjusted_mode.crtc_clock;
6125         mode->flags |= pipe_config->adjusted_mode.flags;
6126 }
6127 
6128 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6129 {
6130         struct drm_device *dev = intel_crtc->base.dev;
6131         struct drm_i915_private *dev_priv = dev->dev_private;
6132         uint32_t pipeconf;
6133 
6134         pipeconf = 0;
6135 
6136         if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
6137             (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
6138                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
6139 
6140         if (intel_crtc->config.double_wide)
6141                 pipeconf |= PIPECONF_DOUBLE_WIDE;
6142 
6143         /* only g4x and later have fancy bpc/dither controls */
6144         if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
6145                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
6146                 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
6147                         pipeconf |= PIPECONF_DITHER_EN |
6148                                     PIPECONF_DITHER_TYPE_SP;
6149 
6150                 switch (intel_crtc->config.pipe_bpp) {
6151                 case 18:
6152                         pipeconf |= PIPECONF_6BPC;
6153                         break;
6154                 case 24:
6155                         pipeconf |= PIPECONF_8BPC;
6156                         break;
6157                 case 30:
6158                         pipeconf |= PIPECONF_10BPC;
6159                         break;
6160                 default:
6161                         /* Case prevented by intel_choose_pipe_bpp_dither. */
6162                         BUG();
6163                 }
6164         }
6165 
6166         if (HAS_PIPE_CXSR(dev)) {
6167                 if (intel_crtc->lowfreq_avail) {
6168                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6169                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6170                 } else {
6171                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6172                 }
6173         }
6174 
6175         if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6176                 if (INTEL_INFO(dev)->gen < 4 ||
6177                     intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
6178                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6179                 else
6180                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6181         } else
6182                 pipeconf |= PIPECONF_PROGRESSIVE;
6183 
6184         if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
6185                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
6186 
6187         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
6188         POSTING_READ(PIPECONF(intel_crtc->pipe));
6189 }
6190 
6191 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6192                               int x, int y,
6193                               struct drm_framebuffer *fb)
6194 {
6195         struct drm_device *dev = crtc->dev;
6196         struct drm_i915_private *dev_priv = dev->dev_private;
6197         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6198         int refclk, num_connectors = 0;
6199         intel_clock_t clock, reduced_clock;
6200         bool ok, has_reduced_clock = false;
6201         bool is_lvds = false, is_dsi = false;
6202         struct intel_encoder *encoder;
6203         const intel_limit_t *limit;
6204 
6205         for_each_encoder_on_crtc(dev, crtc, encoder) {
6206                 switch (encoder->type) {
6207                 case INTEL_OUTPUT_LVDS:
6208                         is_lvds = true;
6209                         break;
6210                 case INTEL_OUTPUT_DSI:
6211                         is_dsi = true;
6212                         break;
6213                 }
6214 
6215                 num_connectors++;
6216         }
6217 
6218         if (is_dsi)
6219                 return 0;
6220 
6221         if (!intel_crtc->config.clock_set) {
6222                 refclk = i9xx_get_refclk(crtc, num_connectors);
6223 
6224                 /*
6225                  * Returns a set of divisors for the desired target clock with
6226                  * the given refclk, or FALSE.  The returned values represent
6227                  * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
6228                  * 2) / p1 / p2.
6229                  */
6230                 limit = intel_limit(crtc, refclk);
6231                 ok = dev_priv->display.find_dpll(limit, crtc,
6232                                                  intel_crtc->config.port_clock,
6233                                                  refclk, NULL, &clock);
6234                 if (!ok) {
6235                         DRM_ERROR("Couldn't find PLL settings for mode!\n");
6236                         return -EINVAL;
6237                 }
6238 
6239                 if (is_lvds && dev_priv->lvds_downclock_avail) {
6240                         /*
6241                          * Ensure we match the reduced clock's P to the target
6242                          * clock.  If the clocks don't match, we can't switch
6243                          * the display clock by using the FP0/FP1. In such case
6244                          * we will disable the LVDS downclock feature.
6245                          */
6246                         has_reduced_clock =
6247                                 dev_priv->display.find_dpll(limit, crtc,
6248                                                             dev_priv->lvds_downclock,
6249                                                             refclk, &clock,
6250                                                             &reduced_clock);
6251                 }
6252                 /* Compat-code for transition, will disappear. */
6253                 intel_crtc->config.dpll.n = clock.n;
6254                 intel_crtc->config.dpll.m1 = clock.m1;
6255                 intel_crtc->config.dpll.m2 = clock.m2;
6256                 intel_crtc->config.dpll.p1 = clock.p1;
6257                 intel_crtc->config.dpll.p2 = clock.p2;
6258         }
6259 
6260         if (IS_GEN2(dev)) {
6261                 i8xx_update_pll(intel_crtc,
6262                                 has_reduced_clock ? &reduced_clock : NULL,
6263                                 num_connectors);
6264         } else if (IS_CHERRYVIEW(dev)) {
6265                 chv_update_pll(intel_crtc);
6266         } else if (IS_VALLEYVIEW(dev)) {
6267                 vlv_update_pll(intel_crtc);
6268         } else {
6269                 i9xx_update_pll(intel_crtc,
6270                                 has_reduced_clock ? &reduced_clock : NULL,
6271                                 num_connectors);
6272         }
6273 
6274         return 0;
6275 }
6276 
6277 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
6278                                  struct intel_crtc_config *pipe_config)
6279 {
6280         struct drm_device *dev = crtc->base.dev;
6281         struct drm_i915_private *dev_priv = dev->dev_private;
6282         uint32_t tmp;
6283 
6284         if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
6285                 return;
6286 
6287         tmp = I915_READ(PFIT_CONTROL);
6288         if (!(tmp & PFIT_ENABLE))
6289                 return;
6290 
6291         /* Check whether the pfit is attached to our pipe. */
6292         if (INTEL_INFO(dev)->gen < 4) {
6293                 if (crtc->pipe != PIPE_B)
6294                         return;
6295         } else {
6296                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
6297                         return;
6298         }
6299 
6300         pipe_config->gmch_pfit.control = tmp;
6301         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
6302         if (INTEL_INFO(dev)->gen < 5)
6303                 pipe_config->gmch_pfit.lvds_border_bits =
6304                         I915_READ(LVDS) & LVDS_BORDER_ENABLE;
6305 }
6306 
6307 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
6308                                struct intel_crtc_config *pipe_config)
6309 {
6310         struct drm_device *dev = crtc->base.dev;
6311         struct drm_i915_private *dev_priv = dev->dev_private;
6312         int pipe = pipe_config->cpu_transcoder;
6313         intel_clock_t clock;
6314         u32 mdiv;
6315         int refclk = 100000;
6316 
6317         /* In case of MIPI DPLL will not even be used */
6318         if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
6319                 return;
6320 
6321         mutex_lock(&dev_priv->dpio_lock);
6322         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
6323         mutex_unlock(&dev_priv->dpio_lock);
6324 
6325         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
6326         clock.m2 = mdiv & DPIO_M2DIV_MASK;
6327         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
6328         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
6329         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
6330 
6331         vlv_clock(refclk, &clock);
6332 
6333         /* clock.dot is the fast clock */
6334         pipe_config->port_clock = clock.dot / 5;
6335 }
6336 
6337 static void i9xx_get_plane_config(struct intel_crtc *crtc,
6338                                   struct intel_plane_config *plane_config)
6339 {
6340         struct drm_device *dev = crtc->base.dev;
6341         struct drm_i915_private *dev_priv = dev->dev_private;
6342         u32 val, base, offset;
6343         int pipe = crtc->pipe, plane = crtc->plane;
6344         int fourcc, pixel_format;
6345         int aligned_height;
6346 
6347         crtc->base.primary->fb = kzalloc(sizeof(struct intel_framebuffer), GFP_KERNEL);
6348         if (!crtc->base.primary->fb) {
6349                 DRM_DEBUG_KMS("failed to alloc fb\n");
6350                 return;
6351         }
6352 
6353         val = I915_READ(DSPCNTR(plane));
6354 
6355         if (INTEL_INFO(dev)->gen >= 4)
6356                 if (val & DISPPLANE_TILED)
6357                         plane_config->tiled = true;
6358 
6359         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
6360         fourcc = intel_format_to_fourcc(pixel_format);
6361         crtc->base.primary->fb->pixel_format = fourcc;
6362         crtc->base.primary->fb->bits_per_pixel =
6363                 drm_format_plane_cpp(fourcc, 0) * 8;
6364 
6365         if (INTEL_INFO(dev)->gen >= 4) {
6366                 if (plane_config->tiled)
6367                         offset = I915_READ(DSPTILEOFF(plane));
6368                 else
6369                         offset = I915_READ(DSPLINOFF(plane));
6370                 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
6371         } else {
6372                 base = I915_READ(DSPADDR(plane));
6373         }
6374         plane_config->base = base;
6375 
6376         val = I915_READ(PIPESRC(pipe));
6377         crtc->base.primary->fb->width = ((val >> 16) & 0xfff) + 1;
6378         crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6379 
6380         val = I915_READ(DSPSTRIDE(pipe));
6381         crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
6382 
6383         aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6384                                             plane_config->tiled);
6385 
6386         plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
6387                                         aligned_height);
6388 
6389         DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
6390                       pipe, plane, crtc->base.primary->fb->width,
6391                       crtc->base.primary->fb->height,
6392                       crtc->base.primary->fb->bits_per_pixel, base,
6393                       crtc->base.primary->fb->pitches[0],
6394                       plane_config->size);
6395 
6396 }
6397 
6398 static void chv_crtc_clock_get(struct intel_crtc *crtc,
6399                                struct