Version:  2.0.40 2.2.26 2.4.37 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6

Linux/drivers/gpu/drm/i915/intel_display.c

  1 /*
  2  * Copyright © 2006-2007 Intel Corporation
  3  *
  4  * Permission is hereby granted, free of charge, to any person obtaining a
  5  * copy of this software and associated documentation files (the "Software"),
  6  * to deal in the Software without restriction, including without limitation
  7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8  * and/or sell copies of the Software, and to permit persons to whom the
  9  * Software is furnished to do so, subject to the following conditions:
 10  *
 11  * The above copyright notice and this permission notice (including the next
 12  * paragraph) shall be included in all copies or substantial portions of the
 13  * Software.
 14  *
 15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 21  * DEALINGS IN THE SOFTWARE.
 22  *
 23  * Authors:
 24  *      Eric Anholt <eric@anholt.net>
 25  */
 26 
 27 #include <linux/dmi.h>
 28 #include <linux/module.h>
 29 #include <linux/input.h>
 30 #include <linux/i2c.h>
 31 #include <linux/kernel.h>
 32 #include <linux/slab.h>
 33 #include <linux/vgaarb.h>
 34 #include <drm/drm_edid.h>
 35 #include <drm/drmP.h>
 36 #include "intel_drv.h"
 37 #include <drm/i915_drm.h>
 38 #include "i915_drv.h"
 39 #include "i915_trace.h"
 40 #include <drm/drm_atomic.h>
 41 #include <drm/drm_atomic_helper.h>
 42 #include <drm/drm_dp_helper.h>
 43 #include <drm/drm_crtc_helper.h>
 44 #include <drm/drm_plane_helper.h>
 45 #include <drm/drm_rect.h>
 46 #include <linux/dma_remapping.h>
 47 #include <linux/reservation.h>
 48 #include <linux/dma-buf.h>
 49 
 50 /* Primary plane formats for gen <= 3 */
 51 static const uint32_t i8xx_primary_formats[] = {
 52         DRM_FORMAT_C8,
 53         DRM_FORMAT_RGB565,
 54         DRM_FORMAT_XRGB1555,
 55         DRM_FORMAT_XRGB8888,
 56 };
 57 
 58 /* Primary plane formats for gen >= 4 */
 59 static const uint32_t i965_primary_formats[] = {
 60         DRM_FORMAT_C8,
 61         DRM_FORMAT_RGB565,
 62         DRM_FORMAT_XRGB8888,
 63         DRM_FORMAT_XBGR8888,
 64         DRM_FORMAT_XRGB2101010,
 65         DRM_FORMAT_XBGR2101010,
 66 };
 67 
 68 static const uint32_t skl_primary_formats[] = {
 69         DRM_FORMAT_C8,
 70         DRM_FORMAT_RGB565,
 71         DRM_FORMAT_XRGB8888,
 72         DRM_FORMAT_XBGR8888,
 73         DRM_FORMAT_ARGB8888,
 74         DRM_FORMAT_ABGR8888,
 75         DRM_FORMAT_XRGB2101010,
 76         DRM_FORMAT_XBGR2101010,
 77         DRM_FORMAT_YUYV,
 78         DRM_FORMAT_YVYU,
 79         DRM_FORMAT_UYVY,
 80         DRM_FORMAT_VYUY,
 81 };
 82 
 83 /* Cursor formats */
 84 static const uint32_t intel_cursor_formats[] = {
 85         DRM_FORMAT_ARGB8888,
 86 };
 87 
 88 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 89                                 struct intel_crtc_state *pipe_config);
 90 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 91                                    struct intel_crtc_state *pipe_config);
 92 
 93 static int intel_framebuffer_init(struct drm_device *dev,
 94                                   struct intel_framebuffer *ifb,
 95                                   struct drm_mode_fb_cmd2 *mode_cmd,
 96                                   struct drm_i915_gem_object *obj);
 97 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
 98 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
 99 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
100                                          struct intel_link_m_n *m_n,
101                                          struct intel_link_m_n *m2_n2);
102 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
103 static void haswell_set_pipeconf(struct drm_crtc *crtc);
104 static void intel_set_pipe_csc(struct drm_crtc *crtc);
105 static void vlv_prepare_pll(struct intel_crtc *crtc,
106                             const struct intel_crtc_state *pipe_config);
107 static void chv_prepare_pll(struct intel_crtc *crtc,
108                             const struct intel_crtc_state *pipe_config);
109 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
111 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
112         struct intel_crtc_state *crtc_state);
113 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
114                            int num_connectors);
115 static void skylake_pfit_enable(struct intel_crtc *crtc);
116 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117 static void ironlake_pfit_enable(struct intel_crtc *crtc);
118 static void intel_modeset_setup_hw_state(struct drm_device *dev);
119 static void intel_pre_disable_primary(struct drm_crtc *crtc);
120 
121 typedef struct {
122         int     min, max;
123 } intel_range_t;
124 
125 typedef struct {
126         int     dot_limit;
127         int     p2_slow, p2_fast;
128 } intel_p2_t;
129 
130 typedef struct intel_limit intel_limit_t;
131 struct intel_limit {
132         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
133         intel_p2_t          p2;
134 };
135 
136 /* returns HPLL frequency in kHz */
137 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
138 {
139         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
140 
141         /* Obtain SKU information */
142         mutex_lock(&dev_priv->sb_lock);
143         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
144                 CCK_FUSE_HPLL_FREQ_MASK;
145         mutex_unlock(&dev_priv->sb_lock);
146 
147         return vco_freq[hpll_freq] * 1000;
148 }
149 
150 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
151                                   const char *name, u32 reg)
152 {
153         u32 val;
154         int divider;
155 
156         if (dev_priv->hpll_freq == 0)
157                 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
158 
159         mutex_lock(&dev_priv->sb_lock);
160         val = vlv_cck_read(dev_priv, reg);
161         mutex_unlock(&dev_priv->sb_lock);
162 
163         divider = val & CCK_FREQUENCY_VALUES;
164 
165         WARN((val & CCK_FREQUENCY_STATUS) !=
166              (divider << CCK_FREQUENCY_STATUS_SHIFT),
167              "%s change in progress\n", name);
168 
169         return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
170 }
171 
172 int
173 intel_pch_rawclk(struct drm_device *dev)
174 {
175         struct drm_i915_private *dev_priv = dev->dev_private;
176 
177         WARN_ON(!HAS_PCH_SPLIT(dev));
178 
179         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
180 }
181 
182 /* hrawclock is 1/4 the FSB frequency */
183 int intel_hrawclk(struct drm_device *dev)
184 {
185         struct drm_i915_private *dev_priv = dev->dev_private;
186         uint32_t clkcfg;
187 
188         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
189         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
190                 return 200;
191 
192         clkcfg = I915_READ(CLKCFG);
193         switch (clkcfg & CLKCFG_FSB_MASK) {
194         case CLKCFG_FSB_400:
195                 return 100;
196         case CLKCFG_FSB_533:
197                 return 133;
198         case CLKCFG_FSB_667:
199                 return 166;
200         case CLKCFG_FSB_800:
201                 return 200;
202         case CLKCFG_FSB_1067:
203                 return 266;
204         case CLKCFG_FSB_1333:
205                 return 333;
206         /* these two are just a guess; one of them might be right */
207         case CLKCFG_FSB_1600:
208         case CLKCFG_FSB_1600_ALT:
209                 return 400;
210         default:
211                 return 133;
212         }
213 }
214 
215 static void intel_update_czclk(struct drm_i915_private *dev_priv)
216 {
217         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
218                 return;
219 
220         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
221                                                       CCK_CZ_CLOCK_CONTROL);
222 
223         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
224 }
225 
226 static inline u32 /* units of 100MHz */
227 intel_fdi_link_freq(struct drm_device *dev)
228 {
229         if (IS_GEN5(dev)) {
230                 struct drm_i915_private *dev_priv = dev->dev_private;
231                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
232         } else
233                 return 27;
234 }
235 
236 static const intel_limit_t intel_limits_i8xx_dac = {
237         .dot = { .min = 25000, .max = 350000 },
238         .vco = { .min = 908000, .max = 1512000 },
239         .n = { .min = 2, .max = 16 },
240         .m = { .min = 96, .max = 140 },
241         .m1 = { .min = 18, .max = 26 },
242         .m2 = { .min = 6, .max = 16 },
243         .p = { .min = 4, .max = 128 },
244         .p1 = { .min = 2, .max = 33 },
245         .p2 = { .dot_limit = 165000,
246                 .p2_slow = 4, .p2_fast = 2 },
247 };
248 
249 static const intel_limit_t intel_limits_i8xx_dvo = {
250         .dot = { .min = 25000, .max = 350000 },
251         .vco = { .min = 908000, .max = 1512000 },
252         .n = { .min = 2, .max = 16 },
253         .m = { .min = 96, .max = 140 },
254         .m1 = { .min = 18, .max = 26 },
255         .m2 = { .min = 6, .max = 16 },
256         .p = { .min = 4, .max = 128 },
257         .p1 = { .min = 2, .max = 33 },
258         .p2 = { .dot_limit = 165000,
259                 .p2_slow = 4, .p2_fast = 4 },
260 };
261 
262 static const intel_limit_t intel_limits_i8xx_lvds = {
263         .dot = { .min = 25000, .max = 350000 },
264         .vco = { .min = 908000, .max = 1512000 },
265         .n = { .min = 2, .max = 16 },
266         .m = { .min = 96, .max = 140 },
267         .m1 = { .min = 18, .max = 26 },
268         .m2 = { .min = 6, .max = 16 },
269         .p = { .min = 4, .max = 128 },
270         .p1 = { .min = 1, .max = 6 },
271         .p2 = { .dot_limit = 165000,
272                 .p2_slow = 14, .p2_fast = 7 },
273 };
274 
275 static const intel_limit_t intel_limits_i9xx_sdvo = {
276         .dot = { .min = 20000, .max = 400000 },
277         .vco = { .min = 1400000, .max = 2800000 },
278         .n = { .min = 1, .max = 6 },
279         .m = { .min = 70, .max = 120 },
280         .m1 = { .min = 8, .max = 18 },
281         .m2 = { .min = 3, .max = 7 },
282         .p = { .min = 5, .max = 80 },
283         .p1 = { .min = 1, .max = 8 },
284         .p2 = { .dot_limit = 200000,
285                 .p2_slow = 10, .p2_fast = 5 },
286 };
287 
288 static const intel_limit_t intel_limits_i9xx_lvds = {
289         .dot = { .min = 20000, .max = 400000 },
290         .vco = { .min = 1400000, .max = 2800000 },
291         .n = { .min = 1, .max = 6 },
292         .m = { .min = 70, .max = 120 },
293         .m1 = { .min = 8, .max = 18 },
294         .m2 = { .min = 3, .max = 7 },
295         .p = { .min = 7, .max = 98 },
296         .p1 = { .min = 1, .max = 8 },
297         .p2 = { .dot_limit = 112000,
298                 .p2_slow = 14, .p2_fast = 7 },
299 };
300 
301 
302 static const intel_limit_t intel_limits_g4x_sdvo = {
303         .dot = { .min = 25000, .max = 270000 },
304         .vco = { .min = 1750000, .max = 3500000},
305         .n = { .min = 1, .max = 4 },
306         .m = { .min = 104, .max = 138 },
307         .m1 = { .min = 17, .max = 23 },
308         .m2 = { .min = 5, .max = 11 },
309         .p = { .min = 10, .max = 30 },
310         .p1 = { .min = 1, .max = 3},
311         .p2 = { .dot_limit = 270000,
312                 .p2_slow = 10,
313                 .p2_fast = 10
314         },
315 };
316 
317 static const intel_limit_t intel_limits_g4x_hdmi = {
318         .dot = { .min = 22000, .max = 400000 },
319         .vco = { .min = 1750000, .max = 3500000},
320         .n = { .min = 1, .max = 4 },
321         .m = { .min = 104, .max = 138 },
322         .m1 = { .min = 16, .max = 23 },
323         .m2 = { .min = 5, .max = 11 },
324         .p = { .min = 5, .max = 80 },
325         .p1 = { .min = 1, .max = 8},
326         .p2 = { .dot_limit = 165000,
327                 .p2_slow = 10, .p2_fast = 5 },
328 };
329 
330 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
331         .dot = { .min = 20000, .max = 115000 },
332         .vco = { .min = 1750000, .max = 3500000 },
333         .n = { .min = 1, .max = 3 },
334         .m = { .min = 104, .max = 138 },
335         .m1 = { .min = 17, .max = 23 },
336         .m2 = { .min = 5, .max = 11 },
337         .p = { .min = 28, .max = 112 },
338         .p1 = { .min = 2, .max = 8 },
339         .p2 = { .dot_limit = 0,
340                 .p2_slow = 14, .p2_fast = 14
341         },
342 };
343 
344 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
345         .dot = { .min = 80000, .max = 224000 },
346         .vco = { .min = 1750000, .max = 3500000 },
347         .n = { .min = 1, .max = 3 },
348         .m = { .min = 104, .max = 138 },
349         .m1 = { .min = 17, .max = 23 },
350         .m2 = { .min = 5, .max = 11 },
351         .p = { .min = 14, .max = 42 },
352         .p1 = { .min = 2, .max = 6 },
353         .p2 = { .dot_limit = 0,
354                 .p2_slow = 7, .p2_fast = 7
355         },
356 };
357 
358 static const intel_limit_t intel_limits_pineview_sdvo = {
359         .dot = { .min = 20000, .max = 400000},
360         .vco = { .min = 1700000, .max = 3500000 },
361         /* Pineview's Ncounter is a ring counter */
362         .n = { .min = 3, .max = 6 },
363         .m = { .min = 2, .max = 256 },
364         /* Pineview only has one combined m divider, which we treat as m2. */
365         .m1 = { .min = 0, .max = 0 },
366         .m2 = { .min = 0, .max = 254 },
367         .p = { .min = 5, .max = 80 },
368         .p1 = { .min = 1, .max = 8 },
369         .p2 = { .dot_limit = 200000,
370                 .p2_slow = 10, .p2_fast = 5 },
371 };
372 
373 static const intel_limit_t intel_limits_pineview_lvds = {
374         .dot = { .min = 20000, .max = 400000 },
375         .vco = { .min = 1700000, .max = 3500000 },
376         .n = { .min = 3, .max = 6 },
377         .m = { .min = 2, .max = 256 },
378         .m1 = { .min = 0, .max = 0 },
379         .m2 = { .min = 0, .max = 254 },
380         .p = { .min = 7, .max = 112 },
381         .p1 = { .min = 1, .max = 8 },
382         .p2 = { .dot_limit = 112000,
383                 .p2_slow = 14, .p2_fast = 14 },
384 };
385 
386 /* Ironlake / Sandybridge
387  *
388  * We calculate clock using (register_value + 2) for N/M1/M2, so here
389  * the range value for them is (actual_value - 2).
390  */
391 static const intel_limit_t intel_limits_ironlake_dac = {
392         .dot = { .min = 25000, .max = 350000 },
393         .vco = { .min = 1760000, .max = 3510000 },
394         .n = { .min = 1, .max = 5 },
395         .m = { .min = 79, .max = 127 },
396         .m1 = { .min = 12, .max = 22 },
397         .m2 = { .min = 5, .max = 9 },
398         .p = { .min = 5, .max = 80 },
399         .p1 = { .min = 1, .max = 8 },
400         .p2 = { .dot_limit = 225000,
401                 .p2_slow = 10, .p2_fast = 5 },
402 };
403 
404 static const intel_limit_t intel_limits_ironlake_single_lvds = {
405         .dot = { .min = 25000, .max = 350000 },
406         .vco = { .min = 1760000, .max = 3510000 },
407         .n = { .min = 1, .max = 3 },
408         .m = { .min = 79, .max = 118 },
409         .m1 = { .min = 12, .max = 22 },
410         .m2 = { .min = 5, .max = 9 },
411         .p = { .min = 28, .max = 112 },
412         .p1 = { .min = 2, .max = 8 },
413         .p2 = { .dot_limit = 225000,
414                 .p2_slow = 14, .p2_fast = 14 },
415 };
416 
417 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
418         .dot = { .min = 25000, .max = 350000 },
419         .vco = { .min = 1760000, .max = 3510000 },
420         .n = { .min = 1, .max = 3 },
421         .m = { .min = 79, .max = 127 },
422         .m1 = { .min = 12, .max = 22 },
423         .m2 = { .min = 5, .max = 9 },
424         .p = { .min = 14, .max = 56 },
425         .p1 = { .min = 2, .max = 8 },
426         .p2 = { .dot_limit = 225000,
427                 .p2_slow = 7, .p2_fast = 7 },
428 };
429 
430 /* LVDS 100mhz refclk limits. */
431 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
432         .dot = { .min = 25000, .max = 350000 },
433         .vco = { .min = 1760000, .max = 3510000 },
434         .n = { .min = 1, .max = 2 },
435         .m = { .min = 79, .max = 126 },
436         .m1 = { .min = 12, .max = 22 },
437         .m2 = { .min = 5, .max = 9 },
438         .p = { .min = 28, .max = 112 },
439         .p1 = { .min = 2, .max = 8 },
440         .p2 = { .dot_limit = 225000,
441                 .p2_slow = 14, .p2_fast = 14 },
442 };
443 
444 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
445         .dot = { .min = 25000, .max = 350000 },
446         .vco = { .min = 1760000, .max = 3510000 },
447         .n = { .min = 1, .max = 3 },
448         .m = { .min = 79, .max = 126 },
449         .m1 = { .min = 12, .max = 22 },
450         .m2 = { .min = 5, .max = 9 },
451         .p = { .min = 14, .max = 42 },
452         .p1 = { .min = 2, .max = 6 },
453         .p2 = { .dot_limit = 225000,
454                 .p2_slow = 7, .p2_fast = 7 },
455 };
456 
457 static const intel_limit_t intel_limits_vlv = {
458          /*
459           * These are the data rate limits (measured in fast clocks)
460           * since those are the strictest limits we have. The fast
461           * clock and actual rate limits are more relaxed, so checking
462           * them would make no difference.
463           */
464         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
465         .vco = { .min = 4000000, .max = 6000000 },
466         .n = { .min = 1, .max = 7 },
467         .m1 = { .min = 2, .max = 3 },
468         .m2 = { .min = 11, .max = 156 },
469         .p1 = { .min = 2, .max = 3 },
470         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
471 };
472 
473 static const intel_limit_t intel_limits_chv = {
474         /*
475          * These are the data rate limits (measured in fast clocks)
476          * since those are the strictest limits we have.  The fast
477          * clock and actual rate limits are more relaxed, so checking
478          * them would make no difference.
479          */
480         .dot = { .min = 25000 * 5, .max = 540000 * 5},
481         .vco = { .min = 4800000, .max = 6480000 },
482         .n = { .min = 1, .max = 1 },
483         .m1 = { .min = 2, .max = 2 },
484         .m2 = { .min = 24 << 22, .max = 175 << 22 },
485         .p1 = { .min = 2, .max = 4 },
486         .p2 = { .p2_slow = 1, .p2_fast = 14 },
487 };
488 
489 static const intel_limit_t intel_limits_bxt = {
490         /* FIXME: find real dot limits */
491         .dot = { .min = 0, .max = INT_MAX },
492         .vco = { .min = 4800000, .max = 6700000 },
493         .n = { .min = 1, .max = 1 },
494         .m1 = { .min = 2, .max = 2 },
495         /* FIXME: find real m2 limits */
496         .m2 = { .min = 2 << 22, .max = 255 << 22 },
497         .p1 = { .min = 2, .max = 4 },
498         .p2 = { .p2_slow = 1, .p2_fast = 20 },
499 };
500 
501 static bool
502 needs_modeset(struct drm_crtc_state *state)
503 {
504         return drm_atomic_crtc_needs_modeset(state);
505 }
506 
507 /**
508  * Returns whether any output on the specified pipe is of the specified type
509  */
510 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
511 {
512         struct drm_device *dev = crtc->base.dev;
513         struct intel_encoder *encoder;
514 
515         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
516                 if (encoder->type == type)
517                         return true;
518 
519         return false;
520 }
521 
522 /**
523  * Returns whether any output on the specified pipe will have the specified
524  * type after a staged modeset is complete, i.e., the same as
525  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
526  * encoder->crtc.
527  */
528 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
529                                       int type)
530 {
531         struct drm_atomic_state *state = crtc_state->base.state;
532         struct drm_connector *connector;
533         struct drm_connector_state *connector_state;
534         struct intel_encoder *encoder;
535         int i, num_connectors = 0;
536 
537         for_each_connector_in_state(state, connector, connector_state, i) {
538                 if (connector_state->crtc != crtc_state->base.crtc)
539                         continue;
540 
541                 num_connectors++;
542 
543                 encoder = to_intel_encoder(connector_state->best_encoder);
544                 if (encoder->type == type)
545                         return true;
546         }
547 
548         WARN_ON(num_connectors == 0);
549 
550         return false;
551 }
552 
553 static const intel_limit_t *
554 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
555 {
556         struct drm_device *dev = crtc_state->base.crtc->dev;
557         const intel_limit_t *limit;
558 
559         if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
560                 if (intel_is_dual_link_lvds(dev)) {
561                         if (refclk == 100000)
562                                 limit = &intel_limits_ironlake_dual_lvds_100m;
563                         else
564                                 limit = &intel_limits_ironlake_dual_lvds;
565                 } else {
566                         if (refclk == 100000)
567                                 limit = &intel_limits_ironlake_single_lvds_100m;
568                         else
569                                 limit = &intel_limits_ironlake_single_lvds;
570                 }
571         } else
572                 limit = &intel_limits_ironlake_dac;
573 
574         return limit;
575 }
576 
577 static const intel_limit_t *
578 intel_g4x_limit(struct intel_crtc_state *crtc_state)
579 {
580         struct drm_device *dev = crtc_state->base.crtc->dev;
581         const intel_limit_t *limit;
582 
583         if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
584                 if (intel_is_dual_link_lvds(dev))
585                         limit = &intel_limits_g4x_dual_channel_lvds;
586                 else
587                         limit = &intel_limits_g4x_single_channel_lvds;
588         } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
589                    intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
590                 limit = &intel_limits_g4x_hdmi;
591         } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
592                 limit = &intel_limits_g4x_sdvo;
593         } else /* The option is for other outputs */
594                 limit = &intel_limits_i9xx_sdvo;
595 
596         return limit;
597 }
598 
599 static const intel_limit_t *
600 intel_limit(struct intel_crtc_state *crtc_state, int refclk)
601 {
602         struct drm_device *dev = crtc_state->base.crtc->dev;
603         const intel_limit_t *limit;
604 
605         if (IS_BROXTON(dev))
606                 limit = &intel_limits_bxt;
607         else if (HAS_PCH_SPLIT(dev))
608                 limit = intel_ironlake_limit(crtc_state, refclk);
609         else if (IS_G4X(dev)) {
610                 limit = intel_g4x_limit(crtc_state);
611         } else if (IS_PINEVIEW(dev)) {
612                 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
613                         limit = &intel_limits_pineview_lvds;
614                 else
615                         limit = &intel_limits_pineview_sdvo;
616         } else if (IS_CHERRYVIEW(dev)) {
617                 limit = &intel_limits_chv;
618         } else if (IS_VALLEYVIEW(dev)) {
619                 limit = &intel_limits_vlv;
620         } else if (!IS_GEN2(dev)) {
621                 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
622                         limit = &intel_limits_i9xx_lvds;
623                 else
624                         limit = &intel_limits_i9xx_sdvo;
625         } else {
626                 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
627                         limit = &intel_limits_i8xx_lvds;
628                 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
629                         limit = &intel_limits_i8xx_dvo;
630                 else
631                         limit = &intel_limits_i8xx_dac;
632         }
633         return limit;
634 }
635 
636 /*
637  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
638  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
639  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
640  * The helpers' return value is the rate of the clock that is fed to the
641  * display engine's pipe which can be the above fast dot clock rate or a
642  * divided-down version of it.
643  */
644 /* m1 is reserved as 0 in Pineview, n is a ring counter */
645 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
646 {
647         clock->m = clock->m2 + 2;
648         clock->p = clock->p1 * clock->p2;
649         if (WARN_ON(clock->n == 0 || clock->p == 0))
650                 return 0;
651         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
652         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
653 
654         return clock->dot;
655 }
656 
657 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
658 {
659         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
660 }
661 
662 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
663 {
664         clock->m = i9xx_dpll_compute_m(clock);
665         clock->p = clock->p1 * clock->p2;
666         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
667                 return 0;
668         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
669         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
670 
671         return clock->dot;
672 }
673 
674 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
675 {
676         clock->m = clock->m1 * clock->m2;
677         clock->p = clock->p1 * clock->p2;
678         if (WARN_ON(clock->n == 0 || clock->p == 0))
679                 return 0;
680         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
681         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
682 
683         return clock->dot / 5;
684 }
685 
686 int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
687 {
688         clock->m = clock->m1 * clock->m2;
689         clock->p = clock->p1 * clock->p2;
690         if (WARN_ON(clock->n == 0 || clock->p == 0))
691                 return 0;
692         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
693                         clock->n << 22);
694         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
695 
696         return clock->dot / 5;
697 }
698 
699 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
700 /**
701  * Returns whether the given set of divisors are valid for a given refclk with
702  * the given connectors.
703  */
704 
705 static bool intel_PLL_is_valid(struct drm_device *dev,
706                                const intel_limit_t *limit,
707                                const intel_clock_t *clock)
708 {
709         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
710                 INTELPllInvalid("n out of range\n");
711         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
712                 INTELPllInvalid("p1 out of range\n");
713         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
714                 INTELPllInvalid("m2 out of range\n");
715         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
716                 INTELPllInvalid("m1 out of range\n");
717 
718         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
719             !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
720                 if (clock->m1 <= clock->m2)
721                         INTELPllInvalid("m1 <= m2\n");
722 
723         if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
724                 if (clock->p < limit->p.min || limit->p.max < clock->p)
725                         INTELPllInvalid("p out of range\n");
726                 if (clock->m < limit->m.min || limit->m.max < clock->m)
727                         INTELPllInvalid("m out of range\n");
728         }
729 
730         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
731                 INTELPllInvalid("vco out of range\n");
732         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
733          * connector, etc., rather than just a single range.
734          */
735         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
736                 INTELPllInvalid("dot out of range\n");
737 
738         return true;
739 }
740 
741 static int
742 i9xx_select_p2_div(const intel_limit_t *limit,
743                    const struct intel_crtc_state *crtc_state,
744                    int target)
745 {
746         struct drm_device *dev = crtc_state->base.crtc->dev;
747 
748         if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
749                 /*
750                  * For LVDS just rely on its current settings for dual-channel.
751                  * We haven't figured out how to reliably set up different
752                  * single/dual channel state, if we even can.
753                  */
754                 if (intel_is_dual_link_lvds(dev))
755                         return limit->p2.p2_fast;
756                 else
757                         return limit->p2.p2_slow;
758         } else {
759                 if (target < limit->p2.dot_limit)
760                         return limit->p2.p2_slow;
761                 else
762                         return limit->p2.p2_fast;
763         }
764 }
765 
766 static bool
767 i9xx_find_best_dpll(const intel_limit_t *limit,
768                     struct intel_crtc_state *crtc_state,
769                     int target, int refclk, intel_clock_t *match_clock,
770                     intel_clock_t *best_clock)
771 {
772         struct drm_device *dev = crtc_state->base.crtc->dev;
773         intel_clock_t clock;
774         int err = target;
775 
776         memset(best_clock, 0, sizeof(*best_clock));
777 
778         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
779 
780         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
781              clock.m1++) {
782                 for (clock.m2 = limit->m2.min;
783                      clock.m2 <= limit->m2.max; clock.m2++) {
784                         if (clock.m2 >= clock.m1)
785                                 break;
786                         for (clock.n = limit->n.min;
787                              clock.n <= limit->n.max; clock.n++) {
788                                 for (clock.p1 = limit->p1.min;
789                                         clock.p1 <= limit->p1.max; clock.p1++) {
790                                         int this_err;
791 
792                                         i9xx_calc_dpll_params(refclk, &clock);
793                                         if (!intel_PLL_is_valid(dev, limit,
794                                                                 &clock))
795                                                 continue;
796                                         if (match_clock &&
797                                             clock.p != match_clock->p)
798                                                 continue;
799 
800                                         this_err = abs(clock.dot - target);
801                                         if (this_err < err) {
802                                                 *best_clock = clock;
803                                                 err = this_err;
804                                         }
805                                 }
806                         }
807                 }
808         }
809 
810         return (err != target);
811 }
812 
813 static bool
814 pnv_find_best_dpll(const intel_limit_t *limit,
815                    struct intel_crtc_state *crtc_state,
816                    int target, int refclk, intel_clock_t *match_clock,
817                    intel_clock_t *best_clock)
818 {
819         struct drm_device *dev = crtc_state->base.crtc->dev;
820         intel_clock_t clock;
821         int err = target;
822 
823         memset(best_clock, 0, sizeof(*best_clock));
824 
825         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
826 
827         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
828              clock.m1++) {
829                 for (clock.m2 = limit->m2.min;
830                      clock.m2 <= limit->m2.max; clock.m2++) {
831                         for (clock.n = limit->n.min;
832                              clock.n <= limit->n.max; clock.n++) {
833                                 for (clock.p1 = limit->p1.min;
834                                         clock.p1 <= limit->p1.max; clock.p1++) {
835                                         int this_err;
836 
837                                         pnv_calc_dpll_params(refclk, &clock);
838                                         if (!intel_PLL_is_valid(dev, limit,
839                                                                 &clock))
840                                                 continue;
841                                         if (match_clock &&
842                                             clock.p != match_clock->p)
843                                                 continue;
844 
845                                         this_err = abs(clock.dot - target);
846                                         if (this_err < err) {
847                                                 *best_clock = clock;
848                                                 err = this_err;
849                                         }
850                                 }
851                         }
852                 }
853         }
854 
855         return (err != target);
856 }
857 
858 static bool
859 g4x_find_best_dpll(const intel_limit_t *limit,
860                    struct intel_crtc_state *crtc_state,
861                    int target, int refclk, intel_clock_t *match_clock,
862                    intel_clock_t *best_clock)
863 {
864         struct drm_device *dev = crtc_state->base.crtc->dev;
865         intel_clock_t clock;
866         int max_n;
867         bool found = false;
868         /* approximately equals target * 0.00585 */
869         int err_most = (target >> 8) + (target >> 9);
870 
871         memset(best_clock, 0, sizeof(*best_clock));
872 
873         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
874 
875         max_n = limit->n.max;
876         /* based on hardware requirement, prefer smaller n to precision */
877         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
878                 /* based on hardware requirement, prefere larger m1,m2 */
879                 for (clock.m1 = limit->m1.max;
880                      clock.m1 >= limit->m1.min; clock.m1--) {
881                         for (clock.m2 = limit->m2.max;
882                              clock.m2 >= limit->m2.min; clock.m2--) {
883                                 for (clock.p1 = limit->p1.max;
884                                      clock.p1 >= limit->p1.min; clock.p1--) {
885                                         int this_err;
886 
887                                         i9xx_calc_dpll_params(refclk, &clock);
888                                         if (!intel_PLL_is_valid(dev, limit,
889                                                                 &clock))
890                                                 continue;
891 
892                                         this_err = abs(clock.dot - target);
893                                         if (this_err < err_most) {
894                                                 *best_clock = clock;
895                                                 err_most = this_err;
896                                                 max_n = clock.n;
897                                                 found = true;
898                                         }
899                                 }
900                         }
901                 }
902         }
903         return found;
904 }
905 
906 /*
907  * Check if the calculated PLL configuration is more optimal compared to the
908  * best configuration and error found so far. Return the calculated error.
909  */
910 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
911                                const intel_clock_t *calculated_clock,
912                                const intel_clock_t *best_clock,
913                                unsigned int best_error_ppm,
914                                unsigned int *error_ppm)
915 {
916         /*
917          * For CHV ignore the error and consider only the P value.
918          * Prefer a bigger P value based on HW requirements.
919          */
920         if (IS_CHERRYVIEW(dev)) {
921                 *error_ppm = 0;
922 
923                 return calculated_clock->p > best_clock->p;
924         }
925 
926         if (WARN_ON_ONCE(!target_freq))
927                 return false;
928 
929         *error_ppm = div_u64(1000000ULL *
930                                 abs(target_freq - calculated_clock->dot),
931                              target_freq);
932         /*
933          * Prefer a better P value over a better (smaller) error if the error
934          * is small. Ensure this preference for future configurations too by
935          * setting the error to 0.
936          */
937         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
938                 *error_ppm = 0;
939 
940                 return true;
941         }
942 
943         return *error_ppm + 10 < best_error_ppm;
944 }
945 
946 static bool
947 vlv_find_best_dpll(const intel_limit_t *limit,
948                    struct intel_crtc_state *crtc_state,
949                    int target, int refclk, intel_clock_t *match_clock,
950                    intel_clock_t *best_clock)
951 {
952         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
953         struct drm_device *dev = crtc->base.dev;
954         intel_clock_t clock;
955         unsigned int bestppm = 1000000;
956         /* min update 19.2 MHz */
957         int max_n = min(limit->n.max, refclk / 19200);
958         bool found = false;
959 
960         target *= 5; /* fast clock */
961 
962         memset(best_clock, 0, sizeof(*best_clock));
963 
964         /* based on hardware requirement, prefer smaller n to precision */
965         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
966                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
967                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
968                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
969                                 clock.p = clock.p1 * clock.p2;
970                                 /* based on hardware requirement, prefer bigger m1,m2 values */
971                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
972                                         unsigned int ppm;
973 
974                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
975                                                                      refclk * clock.m1);
976 
977                                         vlv_calc_dpll_params(refclk, &clock);
978 
979                                         if (!intel_PLL_is_valid(dev, limit,
980                                                                 &clock))
981                                                 continue;
982 
983                                         if (!vlv_PLL_is_optimal(dev, target,
984                                                                 &clock,
985                                                                 best_clock,
986                                                                 bestppm, &ppm))
987                                                 continue;
988 
989                                         *best_clock = clock;
990                                         bestppm = ppm;
991                                         found = true;
992                                 }
993                         }
994                 }
995         }
996 
997         return found;
998 }
999 
1000 static bool
1001 chv_find_best_dpll(const intel_limit_t *limit,
1002                    struct intel_crtc_state *crtc_state,
1003                    int target, int refclk, intel_clock_t *match_clock,
1004                    intel_clock_t *best_clock)
1005 {
1006         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1007         struct drm_device *dev = crtc->base.dev;
1008         unsigned int best_error_ppm;
1009         intel_clock_t clock;
1010         uint64_t m2;
1011         int found = false;
1012 
1013         memset(best_clock, 0, sizeof(*best_clock));
1014         best_error_ppm = 1000000;
1015 
1016         /*
1017          * Based on hardware doc, the n always set to 1, and m1 always
1018          * set to 2.  If requires to support 200Mhz refclk, we need to
1019          * revisit this because n may not 1 anymore.
1020          */
1021         clock.n = 1, clock.m1 = 2;
1022         target *= 5;    /* fast clock */
1023 
1024         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1025                 for (clock.p2 = limit->p2.p2_fast;
1026                                 clock.p2 >= limit->p2.p2_slow;
1027                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1028                         unsigned int error_ppm;
1029 
1030                         clock.p = clock.p1 * clock.p2;
1031 
1032                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1033                                         clock.n) << 22, refclk * clock.m1);
1034 
1035                         if (m2 > INT_MAX/clock.m1)
1036                                 continue;
1037 
1038                         clock.m2 = m2;
1039 
1040                         chv_calc_dpll_params(refclk, &clock);
1041 
1042                         if (!intel_PLL_is_valid(dev, limit, &clock))
1043                                 continue;
1044 
1045                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1046                                                 best_error_ppm, &error_ppm))
1047                                 continue;
1048 
1049                         *best_clock = clock;
1050                         best_error_ppm = error_ppm;
1051                         found = true;
1052                 }
1053         }
1054 
1055         return found;
1056 }
1057 
1058 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1059                         intel_clock_t *best_clock)
1060 {
1061         int refclk = i9xx_get_refclk(crtc_state, 0);
1062 
1063         return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1064                                   target_clock, refclk, NULL, best_clock);
1065 }
1066 
1067 bool intel_crtc_active(struct drm_crtc *crtc)
1068 {
1069         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1070 
1071         /* Be paranoid as we can arrive here with only partial
1072          * state retrieved from the hardware during setup.
1073          *
1074          * We can ditch the adjusted_mode.crtc_clock check as soon
1075          * as Haswell has gained clock readout/fastboot support.
1076          *
1077          * We can ditch the crtc->primary->fb check as soon as we can
1078          * properly reconstruct framebuffers.
1079          *
1080          * FIXME: The intel_crtc->active here should be switched to
1081          * crtc->state->active once we have proper CRTC states wired up
1082          * for atomic.
1083          */
1084         return intel_crtc->active && crtc->primary->state->fb &&
1085                 intel_crtc->config->base.adjusted_mode.crtc_clock;
1086 }
1087 
1088 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1089                                              enum pipe pipe)
1090 {
1091         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1092         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1093 
1094         return intel_crtc->config->cpu_transcoder;
1095 }
1096 
1097 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1098 {
1099         struct drm_i915_private *dev_priv = dev->dev_private;
1100         i915_reg_t reg = PIPEDSL(pipe);
1101         u32 line1, line2;
1102         u32 line_mask;
1103 
1104         if (IS_GEN2(dev))
1105                 line_mask = DSL_LINEMASK_GEN2;
1106         else
1107                 line_mask = DSL_LINEMASK_GEN3;
1108 
1109         line1 = I915_READ(reg) & line_mask;
1110         msleep(5);
1111         line2 = I915_READ(reg) & line_mask;
1112 
1113         return line1 == line2;
1114 }
1115 
1116 /*
1117  * intel_wait_for_pipe_off - wait for pipe to turn off
1118  * @crtc: crtc whose pipe to wait for
1119  *
1120  * After disabling a pipe, we can't wait for vblank in the usual way,
1121  * spinning on the vblank interrupt status bit, since we won't actually
1122  * see an interrupt when the pipe is disabled.
1123  *
1124  * On Gen4 and above:
1125  *   wait for the pipe register state bit to turn off
1126  *
1127  * Otherwise:
1128  *   wait for the display line value to settle (it usually
1129  *   ends up stopping at the start of the next frame).
1130  *
1131  */
1132 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1133 {
1134         struct drm_device *dev = crtc->base.dev;
1135         struct drm_i915_private *dev_priv = dev->dev_private;
1136         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1137         enum pipe pipe = crtc->pipe;
1138 
1139         if (INTEL_INFO(dev)->gen >= 4) {
1140                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1141 
1142                 /* Wait for the Pipe State to go off */
1143                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1144                              100))
1145                         WARN(1, "pipe_off wait timed out\n");
1146         } else {
1147                 /* Wait for the display line to settle */
1148                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1149                         WARN(1, "pipe_off wait timed out\n");
1150         }
1151 }
1152 
1153 /* Only for pre-ILK configs */
1154 void assert_pll(struct drm_i915_private *dev_priv,
1155                 enum pipe pipe, bool state)
1156 {
1157         u32 val;
1158         bool cur_state;
1159 
1160         val = I915_READ(DPLL(pipe));
1161         cur_state = !!(val & DPLL_VCO_ENABLE);
1162         I915_STATE_WARN(cur_state != state,
1163              "PLL state assertion failure (expected %s, current %s)\n",
1164                         onoff(state), onoff(cur_state));
1165 }
1166 
1167 /* XXX: the dsi pll is shared between MIPI DSI ports */
1168 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1169 {
1170         u32 val;
1171         bool cur_state;
1172 
1173         mutex_lock(&dev_priv->sb_lock);
1174         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1175         mutex_unlock(&dev_priv->sb_lock);
1176 
1177         cur_state = val & DSI_PLL_VCO_EN;
1178         I915_STATE_WARN(cur_state != state,
1179              "DSI PLL state assertion failure (expected %s, current %s)\n",
1180                         onoff(state), onoff(cur_state));
1181 }
1182 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1183 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1184 
1185 struct intel_shared_dpll *
1186 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1187 {
1188         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1189 
1190         if (crtc->config->shared_dpll < 0)
1191                 return NULL;
1192 
1193         return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1194 }
1195 
1196 /* For ILK+ */
1197 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1198                         struct intel_shared_dpll *pll,
1199                         bool state)
1200 {
1201         bool cur_state;
1202         struct intel_dpll_hw_state hw_state;
1203 
1204         if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
1205                 return;
1206 
1207         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1208         I915_STATE_WARN(cur_state != state,
1209              "%s assertion failure (expected %s, current %s)\n",
1210                         pll->name, onoff(state), onoff(cur_state));
1211 }
1212 
1213 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1214                           enum pipe pipe, bool state)
1215 {
1216         bool cur_state;
1217         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1218                                                                       pipe);
1219 
1220         if (HAS_DDI(dev_priv->dev)) {
1221                 /* DDI does not have a specific FDI_TX register */
1222                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1223                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1224         } else {
1225                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1226                 cur_state = !!(val & FDI_TX_ENABLE);
1227         }
1228         I915_STATE_WARN(cur_state != state,
1229              "FDI TX state assertion failure (expected %s, current %s)\n",
1230                         onoff(state), onoff(cur_state));
1231 }
1232 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1233 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1234 
1235 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1236                           enum pipe pipe, bool state)
1237 {
1238         u32 val;
1239         bool cur_state;
1240 
1241         val = I915_READ(FDI_RX_CTL(pipe));
1242         cur_state = !!(val & FDI_RX_ENABLE);
1243         I915_STATE_WARN(cur_state != state,
1244              "FDI RX state assertion failure (expected %s, current %s)\n",
1245                         onoff(state), onoff(cur_state));
1246 }
1247 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1248 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1249 
1250 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1251                                       enum pipe pipe)
1252 {
1253         u32 val;
1254 
1255         /* ILK FDI PLL is always enabled */
1256         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1257                 return;
1258 
1259         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1260         if (HAS_DDI(dev_priv->dev))
1261                 return;
1262 
1263         val = I915_READ(FDI_TX_CTL(pipe));
1264         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1265 }
1266 
1267 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1268                        enum pipe pipe, bool state)
1269 {
1270         u32 val;
1271         bool cur_state;
1272 
1273         val = I915_READ(FDI_RX_CTL(pipe));
1274         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1275         I915_STATE_WARN(cur_state != state,
1276              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1277                         onoff(state), onoff(cur_state));
1278 }
1279 
1280 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1281                            enum pipe pipe)
1282 {
1283         struct drm_device *dev = dev_priv->dev;
1284         i915_reg_t pp_reg;
1285         u32 val;
1286         enum pipe panel_pipe = PIPE_A;
1287         bool locked = true;
1288 
1289         if (WARN_ON(HAS_DDI(dev)))
1290                 return;
1291 
1292         if (HAS_PCH_SPLIT(dev)) {
1293                 u32 port_sel;
1294 
1295                 pp_reg = PCH_PP_CONTROL;
1296                 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1297 
1298                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1299                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1300                         panel_pipe = PIPE_B;
1301                 /* XXX: else fix for eDP */
1302         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1303                 /* presumably write lock depends on pipe, not port select */
1304                 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1305                 panel_pipe = pipe;
1306         } else {
1307                 pp_reg = PP_CONTROL;
1308                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1309                         panel_pipe = PIPE_B;
1310         }
1311 
1312         val = I915_READ(pp_reg);
1313         if (!(val & PANEL_POWER_ON) ||
1314             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1315                 locked = false;
1316 
1317         I915_STATE_WARN(panel_pipe == pipe && locked,
1318              "panel assertion failure, pipe %c regs locked\n",
1319              pipe_name(pipe));
1320 }
1321 
1322 static void assert_cursor(struct drm_i915_private *dev_priv,
1323                           enum pipe pipe, bool state)
1324 {
1325         struct drm_device *dev = dev_priv->dev;
1326         bool cur_state;
1327 
1328         if (IS_845G(dev) || IS_I865G(dev))
1329                 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1330         else
1331                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1332 
1333         I915_STATE_WARN(cur_state != state,
1334              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1335                         pipe_name(pipe), onoff(state), onoff(cur_state));
1336 }
1337 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1338 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1339 
1340 void assert_pipe(struct drm_i915_private *dev_priv,
1341                  enum pipe pipe, bool state)
1342 {
1343         bool cur_state;
1344         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1345                                                                       pipe);
1346         enum intel_display_power_domain power_domain;
1347 
1348         /* if we need the pipe quirk it must be always on */
1349         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1350             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1351                 state = true;
1352 
1353         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1354         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1355                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1356                 cur_state = !!(val & PIPECONF_ENABLE);
1357 
1358                 intel_display_power_put(dev_priv, power_domain);
1359         } else {
1360                 cur_state = false;
1361         }
1362 
1363         I915_STATE_WARN(cur_state != state,
1364              "pipe %c assertion failure (expected %s, current %s)\n",
1365                         pipe_name(pipe), onoff(state), onoff(cur_state));
1366 }
1367 
1368 static void assert_plane(struct drm_i915_private *dev_priv,
1369                          enum plane plane, bool state)
1370 {
1371         u32 val;
1372         bool cur_state;
1373 
1374         val = I915_READ(DSPCNTR(plane));
1375         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1376         I915_STATE_WARN(cur_state != state,
1377              "plane %c assertion failure (expected %s, current %s)\n",
1378                         plane_name(plane), onoff(state), onoff(cur_state));
1379 }
1380 
1381 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1382 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1383 
1384 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1385                                    enum pipe pipe)
1386 {
1387         struct drm_device *dev = dev_priv->dev;
1388         int i;
1389 
1390         /* Primary planes are fixed to pipes on gen4+ */
1391         if (INTEL_INFO(dev)->gen >= 4) {
1392                 u32 val = I915_READ(DSPCNTR(pipe));
1393                 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1394                      "plane %c assertion failure, should be disabled but not\n",
1395                      plane_name(pipe));
1396                 return;
1397         }
1398 
1399         /* Need to check both planes against the pipe */
1400         for_each_pipe(dev_priv, i) {
1401                 u32 val = I915_READ(DSPCNTR(i));
1402                 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1403                         DISPPLANE_SEL_PIPE_SHIFT;
1404                 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1405                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1406                      plane_name(i), pipe_name(pipe));
1407         }
1408 }
1409 
1410 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1411                                     enum pipe pipe)
1412 {
1413         struct drm_device *dev = dev_priv->dev;
1414         int sprite;
1415 
1416         if (INTEL_INFO(dev)->gen >= 9) {
1417                 for_each_sprite(dev_priv, pipe, sprite) {
1418                         u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1419                         I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1420                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
1421                              sprite, pipe_name(pipe));
1422                 }
1423         } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1424                 for_each_sprite(dev_priv, pipe, sprite) {
1425                         u32 val = I915_READ(SPCNTR(pipe, sprite));
1426                         I915_STATE_WARN(val & SP_ENABLE,
1427                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1428                              sprite_name(pipe, sprite), pipe_name(pipe));
1429                 }
1430         } else if (INTEL_INFO(dev)->gen >= 7) {
1431                 u32 val = I915_READ(SPRCTL(pipe));
1432                 I915_STATE_WARN(val & SPRITE_ENABLE,
1433                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1434                      plane_name(pipe), pipe_name(pipe));
1435         } else if (INTEL_INFO(dev)->gen >= 5) {
1436                 u32 val = I915_READ(DVSCNTR(pipe));
1437                 I915_STATE_WARN(val & DVS_ENABLE,
1438                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1439                      plane_name(pipe), pipe_name(pipe));
1440         }
1441 }
1442 
1443 static void assert_vblank_disabled(struct drm_crtc *crtc)
1444 {
1445         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1446                 drm_crtc_vblank_put(crtc);
1447 }
1448 
1449 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1450 {
1451         u32 val;
1452         bool enabled;
1453 
1454         I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1455 
1456         val = I915_READ(PCH_DREF_CONTROL);
1457         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1458                             DREF_SUPERSPREAD_SOURCE_MASK));
1459         I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1460 }
1461 
1462 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1463                                            enum pipe pipe)
1464 {
1465         u32 val;
1466         bool enabled;
1467 
1468         val = I915_READ(PCH_TRANSCONF(pipe));
1469         enabled = !!(val & TRANS_ENABLE);
1470         I915_STATE_WARN(enabled,
1471              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1472              pipe_name(pipe));
1473 }
1474 
1475 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1476                             enum pipe pipe, u32 port_sel, u32 val)
1477 {
1478         if ((val & DP_PORT_EN) == 0)
1479                 return false;
1480 
1481         if (HAS_PCH_CPT(dev_priv->dev)) {
1482                 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1483                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1484                         return false;
1485         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1486                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1487                         return false;
1488         } else {
1489                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1490                         return false;
1491         }
1492         return true;
1493 }
1494 
1495 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1496                               enum pipe pipe, u32 val)
1497 {
1498         if ((val & SDVO_ENABLE) == 0)
1499                 return false;
1500 
1501         if (HAS_PCH_CPT(dev_priv->dev)) {
1502                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1503                         return false;
1504         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1505                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1506                         return false;
1507         } else {
1508                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1509                         return false;
1510         }
1511         return true;
1512 }
1513 
1514 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1515                               enum pipe pipe, u32 val)
1516 {
1517         if ((val & LVDS_PORT_EN) == 0)
1518                 return false;
1519 
1520         if (HAS_PCH_CPT(dev_priv->dev)) {
1521                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1522                         return false;
1523         } else {
1524                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1525                         return false;
1526         }
1527         return true;
1528 }
1529 
1530 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1531                               enum pipe pipe, u32 val)
1532 {
1533         if ((val & ADPA_DAC_ENABLE) == 0)
1534                 return false;
1535         if (HAS_PCH_CPT(dev_priv->dev)) {
1536                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1537                         return false;
1538         } else {
1539                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1540                         return false;
1541         }
1542         return true;
1543 }
1544 
1545 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1546                                    enum pipe pipe, i915_reg_t reg,
1547                                    u32 port_sel)
1548 {
1549         u32 val = I915_READ(reg);
1550         I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1551              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1552              i915_mmio_reg_offset(reg), pipe_name(pipe));
1553 
1554         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1555              && (val & DP_PIPEB_SELECT),
1556              "IBX PCH dp port still using transcoder B\n");
1557 }
1558 
1559 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1560                                      enum pipe pipe, i915_reg_t reg)
1561 {
1562         u32 val = I915_READ(reg);
1563         I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1564              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1565              i915_mmio_reg_offset(reg), pipe_name(pipe));
1566 
1567         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1568              && (val & SDVO_PIPE_B_SELECT),
1569              "IBX PCH hdmi port still using transcoder B\n");
1570 }
1571 
1572 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1573                                       enum pipe pipe)
1574 {
1575         u32 val;
1576 
1577         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1578         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1579         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1580 
1581         val = I915_READ(PCH_ADPA);
1582         I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1583              "PCH VGA enabled on transcoder %c, should be disabled\n",
1584              pipe_name(pipe));
1585 
1586         val = I915_READ(PCH_LVDS);
1587         I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1588              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1589              pipe_name(pipe));
1590 
1591         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1592         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1593         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1594 }
1595 
1596 static void vlv_enable_pll(struct intel_crtc *crtc,
1597                            const struct intel_crtc_state *pipe_config)
1598 {
1599         struct drm_device *dev = crtc->base.dev;
1600         struct drm_i915_private *dev_priv = dev->dev_private;
1601         i915_reg_t reg = DPLL(crtc->pipe);
1602         u32 dpll = pipe_config->dpll_hw_state.dpll;
1603 
1604         assert_pipe_disabled(dev_priv, crtc->pipe);
1605 
1606         /* PLL is protected by panel, make sure we can write it */
1607         if (IS_MOBILE(dev_priv->dev))
1608                 assert_panel_unlocked(dev_priv, crtc->pipe);
1609 
1610         I915_WRITE(reg, dpll);
1611         POSTING_READ(reg);
1612         udelay(150);
1613 
1614         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1615                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1616 
1617         I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1618         POSTING_READ(DPLL_MD(crtc->pipe));
1619 
1620         /* We do this three times for luck */
1621         I915_WRITE(reg, dpll);
1622         POSTING_READ(reg);
1623         udelay(150); /* wait for warmup */
1624         I915_WRITE(reg, dpll);
1625         POSTING_READ(reg);
1626         udelay(150); /* wait for warmup */
1627         I915_WRITE(reg, dpll);
1628         POSTING_READ(reg);
1629         udelay(150); /* wait for warmup */
1630 }
1631 
1632 static void chv_enable_pll(struct intel_crtc *crtc,
1633                            const struct intel_crtc_state *pipe_config)
1634 {
1635         struct drm_device *dev = crtc->base.dev;
1636         struct drm_i915_private *dev_priv = dev->dev_private;
1637         int pipe = crtc->pipe;
1638         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1639         u32 tmp;
1640 
1641         assert_pipe_disabled(dev_priv, crtc->pipe);
1642 
1643         mutex_lock(&dev_priv->sb_lock);
1644 
1645         /* Enable back the 10bit clock to display controller */
1646         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1647         tmp |= DPIO_DCLKP_EN;
1648         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1649 
1650         mutex_unlock(&dev_priv->sb_lock);
1651 
1652         /*
1653          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1654          */
1655         udelay(1);
1656 
1657         /* Enable PLL */
1658         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1659 
1660         /* Check PLL is locked */
1661         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1662                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1663 
1664         /* not sure when this should be written */
1665         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1666         POSTING_READ(DPLL_MD(pipe));
1667 }
1668 
1669 static int intel_num_dvo_pipes(struct drm_device *dev)
1670 {
1671         struct intel_crtc *crtc;
1672         int count = 0;
1673 
1674         for_each_intel_crtc(dev, crtc)
1675                 count += crtc->base.state->active &&
1676                         intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1677 
1678         return count;
1679 }
1680 
1681 static void i9xx_enable_pll(struct intel_crtc *crtc)
1682 {
1683         struct drm_device *dev = crtc->base.dev;
1684         struct drm_i915_private *dev_priv = dev->dev_private;
1685         i915_reg_t reg = DPLL(crtc->pipe);
1686         u32 dpll = crtc->config->dpll_hw_state.dpll;
1687 
1688         assert_pipe_disabled(dev_priv, crtc->pipe);
1689 
1690         /* No really, not for ILK+ */
1691         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1692 
1693         /* PLL is protected by panel, make sure we can write it */
1694         if (IS_MOBILE(dev) && !IS_I830(dev))
1695                 assert_panel_unlocked(dev_priv, crtc->pipe);
1696 
1697         /* Enable DVO 2x clock on both PLLs if necessary */
1698         if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1699                 /*
1700                  * It appears to be important that we don't enable this
1701                  * for the current pipe before otherwise configuring the
1702                  * PLL. No idea how this should be handled if multiple
1703                  * DVO outputs are enabled simultaneosly.
1704                  */
1705                 dpll |= DPLL_DVO_2X_MODE;
1706                 I915_WRITE(DPLL(!crtc->pipe),
1707                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1708         }
1709 
1710         /*
1711          * Apparently we need to have VGA mode enabled prior to changing
1712          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1713          * dividers, even though the register value does change.
1714          */
1715         I915_WRITE(reg, 0);
1716 
1717         I915_WRITE(reg, dpll);
1718 
1719         /* Wait for the clocks to stabilize. */
1720         POSTING_READ(reg);
1721         udelay(150);
1722 
1723         if (INTEL_INFO(dev)->gen >= 4) {
1724                 I915_WRITE(DPLL_MD(crtc->pipe),
1725                            crtc->config->dpll_hw_state.dpll_md);
1726         } else {
1727                 /* The pixel multiplier can only be updated once the
1728                  * DPLL is enabled and the clocks are stable.
1729                  *
1730                  * So write it again.
1731                  */
1732                 I915_WRITE(reg, dpll);
1733         }
1734 
1735         /* We do this three times for luck */
1736         I915_WRITE(reg, dpll);
1737         POSTING_READ(reg);
1738         udelay(150); /* wait for warmup */
1739         I915_WRITE(reg, dpll);
1740         POSTING_READ(reg);
1741         udelay(150); /* wait for warmup */
1742         I915_WRITE(reg, dpll);
1743         POSTING_READ(reg);
1744         udelay(150); /* wait for warmup */
1745 }
1746 
1747 /**
1748  * i9xx_disable_pll - disable a PLL
1749  * @dev_priv: i915 private structure
1750  * @pipe: pipe PLL to disable
1751  *
1752  * Disable the PLL for @pipe, making sure the pipe is off first.
1753  *
1754  * Note!  This is for pre-ILK only.
1755  */
1756 static void i9xx_disable_pll(struct intel_crtc *crtc)
1757 {
1758         struct drm_device *dev = crtc->base.dev;
1759         struct drm_i915_private *dev_priv = dev->dev_private;
1760         enum pipe pipe = crtc->pipe;
1761 
1762         /* Disable DVO 2x clock on both PLLs if necessary */
1763         if (IS_I830(dev) &&
1764             intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1765             !intel_num_dvo_pipes(dev)) {
1766                 I915_WRITE(DPLL(PIPE_B),
1767                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1768                 I915_WRITE(DPLL(PIPE_A),
1769                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1770         }
1771 
1772         /* Don't disable pipe or pipe PLLs if needed */
1773         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1774             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1775                 return;
1776 
1777         /* Make sure the pipe isn't still relying on us */
1778         assert_pipe_disabled(dev_priv, pipe);
1779 
1780         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1781         POSTING_READ(DPLL(pipe));
1782 }
1783 
1784 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1785 {
1786         u32 val;
1787 
1788         /* Make sure the pipe isn't still relying on us */
1789         assert_pipe_disabled(dev_priv, pipe);
1790 
1791         /*
1792          * Leave integrated clock source and reference clock enabled for pipe B.
1793          * The latter is needed for VGA hotplug / manual detection.
1794          */
1795         val = DPLL_VGA_MODE_DIS;
1796         if (pipe == PIPE_B)
1797                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
1798         I915_WRITE(DPLL(pipe), val);
1799         POSTING_READ(DPLL(pipe));
1800 
1801 }
1802 
1803 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1804 {
1805         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1806         u32 val;
1807 
1808         /* Make sure the pipe isn't still relying on us */
1809         assert_pipe_disabled(dev_priv, pipe);
1810 
1811         /* Set PLL en = 0 */
1812         val = DPLL_SSC_REF_CLK_CHV |
1813                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1814         if (pipe != PIPE_A)
1815                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1816         I915_WRITE(DPLL(pipe), val);
1817         POSTING_READ(DPLL(pipe));
1818 
1819         mutex_lock(&dev_priv->sb_lock);
1820 
1821         /* Disable 10bit clock to display controller */
1822         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1823         val &= ~DPIO_DCLKP_EN;
1824         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1825 
1826         mutex_unlock(&dev_priv->sb_lock);
1827 }
1828 
1829 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1830                          struct intel_digital_port *dport,
1831                          unsigned int expected_mask)
1832 {
1833         u32 port_mask;
1834         i915_reg_t dpll_reg;
1835 
1836         switch (dport->port) {
1837         case PORT_B:
1838                 port_mask = DPLL_PORTB_READY_MASK;
1839                 dpll_reg = DPLL(0);
1840                 break;
1841         case PORT_C:
1842                 port_mask = DPLL_PORTC_READY_MASK;
1843                 dpll_reg = DPLL(0);
1844                 expected_mask <<= 4;
1845                 break;
1846         case PORT_D:
1847                 port_mask = DPLL_PORTD_READY_MASK;
1848                 dpll_reg = DPIO_PHY_STATUS;
1849                 break;
1850         default:
1851                 BUG();
1852         }
1853 
1854         if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1855                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1856                      port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1857 }
1858 
1859 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1860 {
1861         struct drm_device *dev = crtc->base.dev;
1862         struct drm_i915_private *dev_priv = dev->dev_private;
1863         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1864 
1865         if (WARN_ON(pll == NULL))
1866                 return;
1867 
1868         WARN_ON(!pll->config.crtc_mask);
1869         if (pll->active == 0) {
1870                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1871                 WARN_ON(pll->on);
1872                 assert_shared_dpll_disabled(dev_priv, pll);
1873 
1874                 pll->mode_set(dev_priv, pll);
1875         }
1876 }
1877 
1878 /**
1879  * intel_enable_shared_dpll - enable PCH PLL
1880  * @dev_priv: i915 private structure
1881  * @pipe: pipe PLL to enable
1882  *
1883  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1884  * drives the transcoder clock.
1885  */
1886 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1887 {
1888         struct drm_device *dev = crtc->base.dev;
1889         struct drm_i915_private *dev_priv = dev->dev_private;
1890         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1891 
1892         if (WARN_ON(pll == NULL))
1893                 return;
1894 
1895         if (WARN_ON(pll->config.crtc_mask == 0))
1896                 return;
1897 
1898         DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1899                       pll->name, pll->active, pll->on,
1900                       crtc->base.base.id);
1901 
1902         if (pll->active++) {
1903                 WARN_ON(!pll->on);
1904                 assert_shared_dpll_enabled(dev_priv, pll);
1905                 return;
1906         }
1907         WARN_ON(pll->on);
1908 
1909         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1910 
1911         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1912         pll->enable(dev_priv, pll);
1913         pll->on = true;
1914 }
1915 
1916 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1917 {
1918         struct drm_device *dev = crtc->base.dev;
1919         struct drm_i915_private *dev_priv = dev->dev_private;
1920         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1921 
1922         /* PCH only available on ILK+ */
1923         if (INTEL_INFO(dev)->gen < 5)
1924                 return;
1925 
1926         if (pll == NULL)
1927                 return;
1928 
1929         if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1930                 return;
1931 
1932         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1933                       pll->name, pll->active, pll->on,
1934                       crtc->base.base.id);
1935 
1936         if (WARN_ON(pll->active == 0)) {
1937                 assert_shared_dpll_disabled(dev_priv, pll);
1938                 return;
1939         }
1940 
1941         assert_shared_dpll_enabled(dev_priv, pll);
1942         WARN_ON(!pll->on);
1943         if (--pll->active)
1944                 return;
1945 
1946         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1947         pll->disable(dev_priv, pll);
1948         pll->on = false;
1949 
1950         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1951 }
1952 
1953 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1954                                            enum pipe pipe)
1955 {
1956         struct drm_device *dev = dev_priv->dev;
1957         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1958         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1959         i915_reg_t reg;
1960         uint32_t val, pipeconf_val;
1961 
1962         /* PCH only available on ILK+ */
1963         BUG_ON(!HAS_PCH_SPLIT(dev));
1964 
1965         /* Make sure PCH DPLL is enabled */
1966         assert_shared_dpll_enabled(dev_priv,
1967                                    intel_crtc_to_shared_dpll(intel_crtc));
1968 
1969         /* FDI must be feeding us bits for PCH ports */
1970         assert_fdi_tx_enabled(dev_priv, pipe);
1971         assert_fdi_rx_enabled(dev_priv, pipe);
1972 
1973         if (HAS_PCH_CPT(dev)) {
1974                 /* Workaround: Set the timing override bit before enabling the
1975                  * pch transcoder. */
1976                 reg = TRANS_CHICKEN2(pipe);
1977                 val = I915_READ(reg);
1978                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1979                 I915_WRITE(reg, val);
1980         }
1981 
1982         reg = PCH_TRANSCONF(pipe);
1983         val = I915_READ(reg);
1984         pipeconf_val = I915_READ(PIPECONF(pipe));
1985 
1986         if (HAS_PCH_IBX(dev_priv->dev)) {
1987                 /*
1988                  * Make the BPC in transcoder be consistent with
1989                  * that in pipeconf reg. For HDMI we must use 8bpc
1990                  * here for both 8bpc and 12bpc.
1991                  */
1992                 val &= ~PIPECONF_BPC_MASK;
1993                 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1994                         val |= PIPECONF_8BPC;
1995                 else
1996                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1997         }
1998 
1999         val &= ~TRANS_INTERLACE_MASK;
2000         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2001                 if (HAS_PCH_IBX(dev_priv->dev) &&
2002                     intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
2003                         val |= TRANS_LEGACY_INTERLACED_ILK;
2004                 else
2005                         val |= TRANS_INTERLACED;
2006         else
2007                 val |= TRANS_PROGRESSIVE;
2008 
2009         I915_WRITE(reg, val | TRANS_ENABLE);
2010         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2011                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2012 }
2013 
2014 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2015                                       enum transcoder cpu_transcoder)
2016 {
2017         u32 val, pipeconf_val;
2018 
2019         /* PCH only available on ILK+ */
2020         BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2021 
2022         /* FDI must be feeding us bits for PCH ports */
2023         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2024         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2025 
2026         /* Workaround: set timing override bit. */
2027         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2028         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2029         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2030 
2031         val = TRANS_ENABLE;
2032         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2033 
2034         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2035             PIPECONF_INTERLACED_ILK)
2036                 val |= TRANS_INTERLACED;
2037         else
2038                 val |= TRANS_PROGRESSIVE;
2039 
2040         I915_WRITE(LPT_TRANSCONF, val);
2041         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2042                 DRM_ERROR("Failed to enable PCH transcoder\n");
2043 }
2044 
2045 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2046                                             enum pipe pipe)
2047 {
2048         struct drm_device *dev = dev_priv->dev;
2049         i915_reg_t reg;
2050         uint32_t val;
2051 
2052         /* FDI relies on the transcoder */
2053         assert_fdi_tx_disabled(dev_priv, pipe);
2054         assert_fdi_rx_disabled(dev_priv, pipe);
2055 
2056         /* Ports must be off as well */
2057         assert_pch_ports_disabled(dev_priv, pipe);
2058 
2059         reg = PCH_TRANSCONF(pipe);
2060         val = I915_READ(reg);
2061         val &= ~TRANS_ENABLE;
2062         I915_WRITE(reg, val);
2063         /* wait for PCH transcoder off, transcoder state */
2064         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2065                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2066 
2067         if (HAS_PCH_CPT(dev)) {
2068                 /* Workaround: Clear the timing override chicken bit again. */
2069                 reg = TRANS_CHICKEN2(pipe);
2070                 val = I915_READ(reg);
2071                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2072                 I915_WRITE(reg, val);
2073         }
2074 }
2075 
2076 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2077 {
2078         u32 val;
2079 
2080         val = I915_READ(LPT_TRANSCONF);
2081         val &= ~TRANS_ENABLE;
2082         I915_WRITE(LPT_TRANSCONF, val);
2083         /* wait for PCH transcoder off, transcoder state */
2084         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2085                 DRM_ERROR("Failed to disable PCH transcoder\n");
2086 
2087         /* Workaround: clear timing override bit. */
2088         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2089         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2090         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2091 }
2092 
2093 /**
2094  * intel_enable_pipe - enable a pipe, asserting requirements
2095  * @crtc: crtc responsible for the pipe
2096  *
2097  * Enable @crtc's pipe, making sure that various hardware specific requirements
2098  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2099  */
2100 static void intel_enable_pipe(struct intel_crtc *crtc)
2101 {
2102         struct drm_device *dev = crtc->base.dev;
2103         struct drm_i915_private *dev_priv = dev->dev_private;
2104         enum pipe pipe = crtc->pipe;
2105         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2106         enum pipe pch_transcoder;
2107         i915_reg_t reg;
2108         u32 val;
2109 
2110         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2111 
2112         assert_planes_disabled(dev_priv, pipe);
2113         assert_cursor_disabled(dev_priv, pipe);
2114         assert_sprites_disabled(dev_priv, pipe);
2115 
2116         if (HAS_PCH_LPT(dev_priv->dev))
2117                 pch_transcoder = TRANSCODER_A;
2118         else
2119                 pch_transcoder = pipe;
2120 
2121         /*
2122          * A pipe without a PLL won't actually be able to drive bits from
2123          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2124          * need the check.
2125          */
2126         if (HAS_GMCH_DISPLAY(dev_priv->dev))
2127                 if (crtc->config->has_dsi_encoder)
2128                         assert_dsi_pll_enabled(dev_priv);
2129                 else
2130                         assert_pll_enabled(dev_priv, pipe);
2131         else {
2132                 if (crtc->config->has_pch_encoder) {
2133                         /* if driving the PCH, we need FDI enabled */
2134                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2135                         assert_fdi_tx_pll_enabled(dev_priv,
2136                                                   (enum pipe) cpu_transcoder);
2137                 }
2138                 /* FIXME: assert CPU port conditions for SNB+ */
2139         }
2140 
2141         reg = PIPECONF(cpu_transcoder);
2142         val = I915_READ(reg);
2143         if (val & PIPECONF_ENABLE) {
2144                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2145                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2146                 return;
2147         }
2148 
2149         I915_WRITE(reg, val | PIPECONF_ENABLE);
2150         POSTING_READ(reg);
2151 
2152         /*
2153          * Until the pipe starts DSL will read as 0, which would cause
2154          * an apparent vblank timestamp jump, which messes up also the
2155          * frame count when it's derived from the timestamps. So let's
2156          * wait for the pipe to start properly before we call
2157          * drm_crtc_vblank_on()
2158          */
2159         if (dev->max_vblank_count == 0 &&
2160             wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2161                 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2162 }
2163 
2164 /**
2165  * intel_disable_pipe - disable a pipe, asserting requirements
2166  * @crtc: crtc whose pipes is to be disabled
2167  *
2168  * Disable the pipe of @crtc, making sure that various hardware
2169  * specific requirements are met, if applicable, e.g. plane
2170  * disabled, panel fitter off, etc.
2171  *
2172  * Will wait until the pipe has shut down before returning.
2173  */
2174 static void intel_disable_pipe(struct intel_crtc *crtc)
2175 {
2176         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2177         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2178         enum pipe pipe = crtc->pipe;
2179         i915_reg_t reg;
2180         u32 val;
2181 
2182         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2183 
2184         /*
2185          * Make sure planes won't keep trying to pump pixels to us,
2186          * or we might hang the display.
2187          */
2188         assert_planes_disabled(dev_priv, pipe);
2189         assert_cursor_disabled(dev_priv, pipe);
2190         assert_sprites_disabled(dev_priv, pipe);
2191 
2192         reg = PIPECONF(cpu_transcoder);
2193         val = I915_READ(reg);
2194         if ((val & PIPECONF_ENABLE) == 0)
2195                 return;
2196 
2197         /*
2198          * Double wide has implications for planes
2199          * so best keep it disabled when not needed.
2200          */
2201         if (crtc->config->double_wide)
2202                 val &= ~PIPECONF_DOUBLE_WIDE;
2203 
2204         /* Don't disable pipe or pipe PLLs if needed */
2205         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2206             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2207                 val &= ~PIPECONF_ENABLE;
2208 
2209         I915_WRITE(reg, val);
2210         if ((val & PIPECONF_ENABLE) == 0)
2211                 intel_wait_for_pipe_off(crtc);
2212 }
2213 
2214 static bool need_vtd_wa(struct drm_device *dev)
2215 {
2216 #ifdef CONFIG_INTEL_IOMMU
2217         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2218                 return true;
2219 #endif
2220         return false;
2221 }
2222 
2223 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2224 {
2225         return IS_GEN2(dev_priv) ? 2048 : 4096;
2226 }
2227 
2228 static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
2229                                      uint64_t fb_modifier, unsigned int cpp)
2230 {
2231         switch (fb_modifier) {
2232         case DRM_FORMAT_MOD_NONE:
2233                 return cpp;
2234         case I915_FORMAT_MOD_X_TILED:
2235                 if (IS_GEN2(dev_priv))
2236                         return 128;
2237                 else
2238                         return 512;
2239         case I915_FORMAT_MOD_Y_TILED:
2240                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2241                         return 128;
2242                 else
2243                         return 512;
2244         case I915_FORMAT_MOD_Yf_TILED:
2245                 switch (cpp) {
2246                 case 1:
2247                         return 64;
2248                 case 2:
2249                 case 4:
2250                         return 128;
2251                 case 8:
2252                 case 16:
2253                         return 256;
2254                 default:
2255                         MISSING_CASE(cpp);
2256                         return cpp;
2257                 }
2258                 break;
2259         default:
2260                 MISSING_CASE(fb_modifier);
2261                 return cpp;
2262         }
2263 }
2264 
2265 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2266                                uint64_t fb_modifier, unsigned int cpp)
2267 {
2268         if (fb_modifier == DRM_FORMAT_MOD_NONE)
2269                 return 1;
2270         else
2271                 return intel_tile_size(dev_priv) /
2272                         intel_tile_width(dev_priv, fb_modifier, cpp);
2273 }
2274 
2275 unsigned int
2276 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2277                       uint32_t pixel_format, uint64_t fb_modifier)
2278 {
2279         unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2280         unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2281 
2282         return ALIGN(height, tile_height);
2283 }
2284 
2285 static void
2286 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2287                         const struct drm_plane_state *plane_state)
2288 {
2289         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2290         struct intel_rotation_info *info = &view->params.rotated;
2291         unsigned int tile_size, tile_width, tile_height, cpp;
2292 
2293         *view = i915_ggtt_view_normal;
2294 
2295         if (!plane_state)
2296                 return;
2297 
2298         if (!intel_rotation_90_or_270(plane_state->rotation))
2299                 return;
2300 
2301         *view = i915_ggtt_view_rotated;
2302 
2303         info->height = fb->height;
2304         info->pixel_format = fb->pixel_format;
2305         info->pitch = fb->pitches[0];
2306         info->uv_offset = fb->offsets[1];
2307         info->fb_modifier = fb->modifier[0];
2308 
2309         tile_size = intel_tile_size(dev_priv);
2310 
2311         cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2312         tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
2313         tile_height = tile_size / tile_width;
2314 
2315         info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
2316         info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2317         info->size = info->width_pages * info->height_pages * tile_size;
2318 
2319         if (info->pixel_format == DRM_FORMAT_NV12) {
2320                 cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2321                 tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
2322                 tile_height = tile_size / tile_width;
2323 
2324                 info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
2325                 info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
2326                 info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
2327         }
2328 }
2329 
2330 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2331 {
2332         if (INTEL_INFO(dev_priv)->gen >= 9)
2333                 return 256 * 1024;
2334         else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2335                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2336                 return 128 * 1024;
2337         else if (INTEL_INFO(dev_priv)->gen >= 4)
2338                 return 4 * 1024;
2339         else
2340                 return 0;
2341 }
2342 
2343 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2344                                          uint64_t fb_modifier)
2345 {
2346         switch (fb_modifier) {
2347         case DRM_FORMAT_MOD_NONE:
2348                 return intel_linear_alignment(dev_priv);
2349         case I915_FORMAT_MOD_X_TILED:
2350                 if (INTEL_INFO(dev_priv)->gen >= 9)
2351                         return 256 * 1024;
2352                 return 0;
2353         case I915_FORMAT_MOD_Y_TILED:
2354         case I915_FORMAT_MOD_Yf_TILED:
2355                 return 1 * 1024 * 1024;
2356         default:
2357                 MISSING_CASE(fb_modifier);
2358                 return 0;
2359         }
2360 }
2361 
2362 int
2363 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2364                            struct drm_framebuffer *fb,
2365                            const struct drm_plane_state *plane_state)
2366 {
2367         struct drm_device *dev = fb->dev;
2368         struct drm_i915_private *dev_priv = dev->dev_private;
2369         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2370         struct i915_ggtt_view view;
2371         u32 alignment;
2372         int ret;
2373 
2374         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2375 
2376         alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2377 
2378         intel_fill_fb_ggtt_view(&view, fb, plane_state);
2379 
2380         /* Note that the w/a also requires 64 PTE of padding following the
2381          * bo. We currently fill all unused PTE with the shadow page and so
2382          * we should always have valid PTE following the scanout preventing
2383          * the VT-d warning.
2384          */
2385         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2386                 alignment = 256 * 1024;
2387 
2388         /*
2389          * Global gtt pte registers are special registers which actually forward
2390          * writes to a chunk of system memory. Which means that there is no risk
2391          * that the register values disappear as soon as we call
2392          * intel_runtime_pm_put(), so it is correct to wrap only the
2393          * pin/unpin/fence and not more.
2394          */
2395         intel_runtime_pm_get(dev_priv);
2396 
2397         ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2398                                                    &view);
2399         if (ret)
2400                 goto err_pm;
2401 
2402         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2403          * fence, whereas 965+ only requires a fence if using
2404          * framebuffer compression.  For simplicity, we always install
2405          * a fence as the cost is not that onerous.
2406          */
2407         if (view.type == I915_GGTT_VIEW_NORMAL) {
2408                 ret = i915_gem_object_get_fence(obj);
2409                 if (ret == -EDEADLK) {
2410                         /*
2411                          * -EDEADLK means there are no free fences
2412                          * no pending flips.
2413                          *
2414                          * This is propagated to atomic, but it uses
2415                          * -EDEADLK to force a locking recovery, so
2416                          * change the returned error to -EBUSY.
2417                          */
2418                         ret = -EBUSY;
2419                         goto err_unpin;
2420                 } else if (ret)
2421                         goto err_unpin;
2422 
2423                 i915_gem_object_pin_fence(obj);
2424         }
2425 
2426         intel_runtime_pm_put(dev_priv);
2427         return 0;
2428 
2429 err_unpin:
2430         i915_gem_object_unpin_from_display_plane(obj, &view);
2431 err_pm:
2432         intel_runtime_pm_put(dev_priv);
2433         return ret;
2434 }
2435 
2436 static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2437                                const struct drm_plane_state *plane_state)
2438 {
2439         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2440         struct i915_ggtt_view view;
2441 
2442         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2443 
2444         intel_fill_fb_ggtt_view(&view, fb, plane_state);
2445 
2446         if (view.type == I915_GGTT_VIEW_NORMAL)
2447                 i915_gem_object_unpin_fence(obj);
2448 
2449         i915_gem_object_unpin_from_display_plane(obj, &view);
2450 }
2451 
2452 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2453  * is assumed to be a power-of-two. */
2454 u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
2455                               int *x, int *y,
2456                               uint64_t fb_modifier,
2457                               unsigned int cpp,
2458                               unsigned int pitch)
2459 {
2460         if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2461                 unsigned int tile_size, tile_width, tile_height;
2462                 unsigned int tile_rows, tiles;
2463 
2464                 tile_size = intel_tile_size(dev_priv);
2465                 tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
2466                 tile_height = tile_size / tile_width;
2467 
2468                 tile_rows = *y / tile_height;
2469                 *y %= tile_height;
2470 
2471                 tiles = *x / (tile_width/cpp);
2472                 *x %= tile_width/cpp;
2473 
2474                 return tile_rows * pitch * tile_height + tiles * tile_size;
2475         } else {
2476                 unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
2477                 unsigned int offset;
2478 
2479                 offset = *y * pitch + *x * cpp;
2480                 *y = (offset & alignment) / pitch;
2481                 *x = ((offset & alignment) - *y * pitch) / cpp;
2482                 return offset & ~alignment;
2483         }
2484 }
2485 
2486 static int i9xx_format_to_fourcc(int format)
2487 {
2488         switch (format) {
2489         case DISPPLANE_8BPP:
2490                 return DRM_FORMAT_C8;
2491         case DISPPLANE_BGRX555:
2492                 return DRM_FORMAT_XRGB1555;
2493         case DISPPLANE_BGRX565:
2494                 return DRM_FORMAT_RGB565;
2495         default:
2496         case DISPPLANE_BGRX888:
2497                 return DRM_FORMAT_XRGB8888;
2498         case DISPPLANE_RGBX888:
2499                 return DRM_FORMAT_XBGR8888;
2500         case DISPPLANE_BGRX101010:
2501                 return DRM_FORMAT_XRGB2101010;
2502         case DISPPLANE_RGBX101010:
2503                 return DRM_FORMAT_XBGR2101010;
2504         }
2505 }
2506 
2507 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2508 {
2509         switch (format) {
2510         case PLANE_CTL_FORMAT_RGB_565:
2511                 return DRM_FORMAT_RGB565;
2512         default:
2513         case PLANE_CTL_FORMAT_XRGB_8888:
2514                 if (rgb_order) {
2515                         if (alpha)
2516                                 return DRM_FORMAT_ABGR8888;
2517                         else
2518                                 return DRM_FORMAT_XBGR8888;
2519                 } else {
2520                         if (alpha)
2521                                 return DRM_FORMAT_ARGB8888;
2522                         else
2523                                 return DRM_FORMAT_XRGB8888;
2524                 }
2525         case PLANE_CTL_FORMAT_XRGB_2101010:
2526                 if (rgb_order)
2527                         return DRM_FORMAT_XBGR2101010;
2528                 else
2529                         return DRM_FORMAT_XRGB2101010;
2530         }
2531 }
2532 
2533 static bool
2534 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2535                               struct intel_initial_plane_config *plane_config)
2536 {
2537         struct drm_device *dev = crtc->base.dev;
2538         struct drm_i915_private *dev_priv = to_i915(dev);
2539         struct drm_i915_gem_object *obj = NULL;
2540         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2541         struct drm_framebuffer *fb = &plane_config->fb->base;
2542         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2543         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2544                                     PAGE_SIZE);
2545 
2546         size_aligned -= base_aligned;
2547 
2548         if (plane_config->size == 0)
2549                 return false;
2550 
2551         /* If the FB is too big, just don't use it since fbdev is not very
2552          * important and we should probably use that space with FBC or other
2553          * features. */
2554         if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2555                 return false;
2556 
2557         mutex_lock(&dev->struct_mutex);
2558 
2559         obj = i915_gem_object_create_stolen_for_preallocated(dev,
2560                                                              base_aligned,
2561                                                              base_aligned,
2562                                                              size_aligned);
2563         if (!obj) {
2564                 mutex_unlock(&dev->struct_mutex);
2565                 return false;
2566         }
2567 
2568         obj->tiling_mode = plane_config->tiling;
2569         if (obj->tiling_mode == I915_TILING_X)
2570                 obj->stride = fb->pitches[0];
2571 
2572         mode_cmd.pixel_format = fb->pixel_format;
2573         mode_cmd.width = fb->width;
2574         mode_cmd.height = fb->height;
2575         mode_cmd.pitches[0] = fb->pitches[0];
2576         mode_cmd.modifier[0] = fb->modifier[0];
2577         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2578 
2579         if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2580                                    &mode_cmd, obj)) {
2581                 DRM_DEBUG_KMS("intel fb init failed\n");
2582                 goto out_unref_obj;
2583         }
2584 
2585         mutex_unlock(&dev->struct_mutex);
2586 
2587         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2588         return true;
2589 
2590 out_unref_obj:
2591         drm_gem_object_unreference(&obj->base);
2592         mutex_unlock(&dev->struct_mutex);
2593         return false;
2594 }
2595 
2596 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2597 static void
2598 update_state_fb(struct drm_plane *plane)
2599 {
2600         if (plane->fb == plane->state->fb)
2601                 return;
2602 
2603         if (plane->state->fb)
2604                 drm_framebuffer_unreference(plane->state->fb);
2605         plane->state->fb = plane->fb;
2606         if (plane->state->fb)
2607                 drm_framebuffer_reference(plane->state->fb);
2608 }
2609 
2610 static void
2611 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2612                              struct intel_initial_plane_config *plane_config)
2613 {
2614         struct drm_device *dev = intel_crtc->base.dev;
2615         struct drm_i915_private *dev_priv = dev->dev_private;
2616         struct drm_crtc *c;
2617         struct intel_crtc *i;
2618         struct drm_i915_gem_object *obj;
2619         struct drm_plane *primary = intel_crtc->base.primary;
2620         struct drm_plane_state *plane_state = primary->state;
2621         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2622         struct intel_plane *intel_plane = to_intel_plane(primary);
2623         struct intel_plane_state *intel_state =
2624                 to_intel_plane_state(plane_state);
2625         struct drm_framebuffer *fb;
2626 
2627         if (!plane_config->fb)
2628                 return;
2629 
2630         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2631                 fb = &plane_config->fb->base;
2632                 goto valid_fb;
2633         }
2634 
2635         kfree(plane_config->fb);
2636 
2637         /*
2638          * Failed to alloc the obj, check to see if we should share
2639          * an fb with another CRTC instead
2640          */
2641         for_each_crtc(dev, c) {
2642                 i = to_intel_crtc(c);
2643 
2644                 if (c == &intel_crtc->base)
2645                         continue;
2646 
2647                 if (!i->active)
2648                         continue;
2649 
2650                 fb = c->primary->fb;
2651                 if (!fb)
2652                         continue;
2653 
2654                 obj = intel_fb_obj(fb);
2655                 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2656                         drm_framebuffer_reference(fb);
2657                         goto valid_fb;
2658                 }
2659         }
2660 
2661         /*
2662          * We've failed to reconstruct the BIOS FB.  Current display state
2663          * indicates that the primary plane is visible, but has a NULL FB,
2664          * which will lead to problems later if we don't fix it up.  The
2665          * simplest solution is to just disable the primary plane now and
2666          * pretend the BIOS never had it enabled.
2667          */
2668         to_intel_plane_state(plane_state)->visible = false;
2669         crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2670         intel_pre_disable_primary(&intel_crtc->base);
2671         intel_plane->disable_plane(primary, &intel_crtc->base);
2672 
2673         return;
2674 
2675 valid_fb:
2676         plane_state->src_x = 0;
2677         plane_state->src_y = 0;
2678         plane_state->src_w = fb->width << 16;
2679         plane_state->src_h = fb->height << 16;
2680 
2681         plane_state->crtc_x = 0;
2682         plane_state->crtc_y = 0;
2683         plane_state->crtc_w = fb->width;
2684         plane_state->crtc_h = fb->height;
2685 
2686         intel_state->src.x1 = plane_state->src_x;
2687         intel_state->src.y1 = plane_state->src_y;
2688         intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2689         intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2690         intel_state->dst.x1 = plane_state->crtc_x;
2691         intel_state->dst.y1 = plane_state->crtc_y;
2692         intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2693         intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2694 
2695         obj = intel_fb_obj(fb);
2696         if (obj->tiling_mode != I915_TILING_NONE)
2697                 dev_priv->preserve_bios_swizzle = true;
2698 
2699         drm_framebuffer_reference(fb);
2700         primary->fb = primary->state->fb = fb;
2701         primary->crtc = primary->state->crtc = &intel_crtc->base;
2702         intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2703         obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2704 }
2705 
2706 static void i9xx_update_primary_plane(struct drm_plane *primary,
2707                                       const struct intel_crtc_state *crtc_state,
2708                                       const struct intel_plane_state *plane_state)
2709 {
2710         struct drm_device *dev = primary->dev;
2711         struct drm_i915_private *dev_priv = dev->dev_private;
2712         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2713         struct drm_framebuffer *fb = plane_state->base.fb;
2714         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2715         int plane = intel_crtc->plane;
2716         u32 linear_offset;
2717         u32 dspcntr;
2718         i915_reg_t reg = DSPCNTR(plane);
2719         int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2720         int x = plane_state->src.x1 >> 16;
2721         int y = plane_state->src.y1 >> 16;
2722 
2723         dspcntr = DISPPLANE_GAMMA_ENABLE;
2724 
2725         dspcntr |= DISPLAY_PLANE_ENABLE;
2726 
2727         if (INTEL_INFO(dev)->gen < 4) {
2728                 if (intel_crtc->pipe == PIPE_B)
2729                         dspcntr |= DISPPLANE_SEL_PIPE_B;
2730 
2731                 /* pipesrc and dspsize control the size that is scaled from,
2732                  * which should always be the user's requested size.
2733                  */
2734                 I915_WRITE(DSPSIZE(plane),
2735                            ((crtc_state->pipe_src_h - 1) << 16) |
2736                            (crtc_state->pipe_src_w - 1));
2737                 I915_WRITE(DSPPOS(plane), 0);
2738         } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2739                 I915_WRITE(PRIMSIZE(plane),
2740                            ((crtc_state->pipe_src_h - 1) << 16) |
2741                            (crtc_state->pipe_src_w - 1));
2742                 I915_WRITE(PRIMPOS(plane), 0);
2743                 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2744         }
2745 
2746         switch (fb->pixel_format) {
2747         case DRM_FORMAT_C8:
2748                 dspcntr |= DISPPLANE_8BPP;
2749                 break;
2750         case DRM_FORMAT_XRGB1555:
2751                 dspcntr |= DISPPLANE_BGRX555;
2752                 break;
2753         case DRM_FORMAT_RGB565:
2754                 dspcntr |= DISPPLANE_BGRX565;
2755                 break;
2756         case DRM_FORMAT_XRGB8888:
2757                 dspcntr |= DISPPLANE_BGRX888;
2758                 break;
2759         case DRM_FORMAT_XBGR8888:
2760                 dspcntr |= DISPPLANE_RGBX888;
2761                 break;
2762         case DRM_FORMAT_XRGB2101010:
2763                 dspcntr |= DISPPLANE_BGRX101010;
2764                 break;
2765         case DRM_FORMAT_XBGR2101010:
2766                 dspcntr |= DISPPLANE_RGBX101010;
2767                 break;
2768         default:
2769                 BUG();
2770         }
2771 
2772         if (INTEL_INFO(dev)->gen >= 4 &&
2773             obj->tiling_mode != I915_TILING_NONE)
2774                 dspcntr |= DISPPLANE_TILED;
2775 
2776         if (IS_G4X(dev))
2777                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2778 
2779         linear_offset = y * fb->pitches[0] + x * cpp;
2780 
2781         if (INTEL_INFO(dev)->gen >= 4) {
2782                 intel_crtc->dspaddr_offset =
2783                         intel_compute_tile_offset(dev_priv, &x, &y,
2784                                                   fb->modifier[0], cpp,
2785                                                   fb->pitches[0]);
2786                 linear_offset -= intel_crtc->dspaddr_offset;
2787         } else {
2788                 intel_crtc->dspaddr_offset = linear_offset;
2789         }
2790 
2791         if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
2792                 dspcntr |= DISPPLANE_ROTATE_180;
2793 
2794                 x += (crtc_state->pipe_src_w - 1);
2795                 y += (crtc_state->pipe_src_h - 1);
2796 
2797                 /* Finding the last pixel of the last line of the display
2798                 data and adding to linear_offset*/
2799                 linear_offset +=
2800                         (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2801                         (crtc_state->pipe_src_w - 1) * cpp;
2802         }
2803 
2804         intel_crtc->adjusted_x = x;
2805         intel_crtc->adjusted_y = y;
2806 
2807         I915_WRITE(reg, dspcntr);
2808 
2809         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2810         if (INTEL_INFO(dev)->gen >= 4) {
2811                 I915_WRITE(DSPSURF(plane),
2812                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2813                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2814                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2815         } else
2816                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2817         POSTING_READ(reg);
2818 }
2819 
2820 static void i9xx_disable_primary_plane(struct drm_plane *primary,
2821                                        struct drm_crtc *crtc)
2822 {
2823         struct drm_device *dev = crtc->dev;
2824         struct drm_i915_private *dev_priv = dev->dev_private;
2825         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2826         int plane = intel_crtc->plane;
2827 
2828         I915_WRITE(DSPCNTR(plane), 0);
2829         if (INTEL_INFO(dev_priv)->gen >= 4)
2830                 I915_WRITE(DSPSURF(plane), 0);
2831         else
2832                 I915_WRITE(DSPADDR(plane), 0);
2833         POSTING_READ(DSPCNTR(plane));
2834 }
2835 
2836 static void ironlake_update_primary_plane(struct drm_plane *primary,
2837                                           const struct intel_crtc_state *crtc_state,
2838                                           const struct intel_plane_state *plane_state)
2839 {
2840         struct drm_device *dev = primary->dev;
2841         struct drm_i915_private *dev_priv = dev->dev_private;
2842         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2843         struct drm_framebuffer *fb = plane_state->base.fb;
2844         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2845         int plane = intel_crtc->plane;
2846         u32 linear_offset;
2847         u32 dspcntr;
2848         i915_reg_t reg = DSPCNTR(plane);
2849         int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2850         int x = plane_state->src.x1 >> 16;
2851         int y = plane_state->src.y1 >> 16;
2852 
2853         dspcntr = DISPPLANE_GAMMA_ENABLE;
2854         dspcntr |= DISPLAY_PLANE_ENABLE;
2855 
2856         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2857                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2858 
2859         switch (fb->pixel_format) {
2860         case DRM_FORMAT_C8:
2861                 dspcntr |= DISPPLANE_8BPP;
2862                 break;
2863         case DRM_FORMAT_RGB565:
2864                 dspcntr |= DISPPLANE_BGRX565;
2865                 break;
2866         case DRM_FORMAT_XRGB8888:
2867                 dspcntr |= DISPPLANE_BGRX888;
2868                 break;
2869         case DRM_FORMAT_XBGR8888:
2870                 dspcntr |= DISPPLANE_RGBX888;
2871                 break;
2872         case DRM_FORMAT_XRGB2101010:
2873                 dspcntr |= DISPPLANE_BGRX101010;
2874                 break;
2875         case DRM_FORMAT_XBGR2101010:
2876                 dspcntr |= DISPPLANE_RGBX101010;
2877                 break;
2878         default:
2879                 BUG();
2880         }
2881 
2882         if (obj->tiling_mode != I915_TILING_NONE)
2883                 dspcntr |= DISPPLANE_TILED;
2884 
2885         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2886                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2887 
2888         linear_offset = y * fb->pitches[0] + x * cpp;
2889         intel_crtc->dspaddr_offset =
2890                 intel_compute_tile_offset(dev_priv, &x, &y,
2891                                           fb->modifier[0], cpp,
2892                                           fb->pitches[0]);
2893         linear_offset -= intel_crtc->dspaddr_offset;
2894         if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
2895                 dspcntr |= DISPPLANE_ROTATE_180;
2896 
2897                 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2898                         x += (crtc_state->pipe_src_w - 1);
2899                         y += (crtc_state->pipe_src_h - 1);
2900 
2901                         /* Finding the last pixel of the last line of the display
2902                         data and adding to linear_offset*/
2903                         linear_offset +=
2904                                 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2905                                 (crtc_state->pipe_src_w - 1) * cpp;
2906                 }
2907         }
2908 
2909         intel_crtc->adjusted_x = x;
2910         intel_crtc->adjusted_y = y;
2911 
2912         I915_WRITE(reg, dspcntr);
2913 
2914         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2915         I915_WRITE(DSPSURF(plane),
2916                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2917         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2918                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2919         } else {
2920                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2921                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2922         }
2923         POSTING_READ(reg);
2924 }
2925 
2926 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2927                               uint64_t fb_modifier, uint32_t pixel_format)
2928 {
2929         if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2930                 return 64;
2931         } else {
2932                 int cpp = drm_format_plane_cpp(pixel_format, 0);
2933 
2934                 return intel_tile_width(dev_priv, fb_modifier, cpp);
2935         }
2936 }
2937 
2938 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2939                            struct drm_i915_gem_object *obj,
2940                            unsigned int plane)
2941 {
2942         struct i915_ggtt_view view;
2943         struct i915_vma *vma;
2944         u64 offset;
2945 
2946         intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2947                                 intel_plane->base.state);
2948 
2949         vma = i915_gem_obj_to_ggtt_view(obj, &view);
2950         if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2951                 view.type))
2952                 return -1;
2953 
2954         offset = vma->node.start;
2955 
2956         if (plane == 1) {
2957                 offset += vma->ggtt_view.params.rotated.uv_start_page *
2958                           PAGE_SIZE;
2959         }
2960 
2961         WARN_ON(upper_32_bits(offset));
2962 
2963         return lower_32_bits(offset);
2964 }
2965 
2966 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2967 {
2968         struct drm_device *dev = intel_crtc->base.dev;
2969         struct drm_i915_private *dev_priv = dev->dev_private;
2970 
2971         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2972         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2973         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2974 }
2975 
2976 /*
2977  * This function detaches (aka. unbinds) unused scalers in hardware
2978  */
2979 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2980 {
2981         struct intel_crtc_scaler_state *scaler_state;
2982         int i;
2983 
2984         scaler_state = &intel_crtc->config->scaler_state;
2985 
2986         /* loop through and disable scalers that aren't in use */
2987         for (i = 0; i < intel_crtc->num_scalers; i++) {
2988                 if (!scaler_state->scalers[i].in_use)
2989                         skl_detach_scaler(intel_crtc, i);
2990         }
2991 }
2992 
2993 u32 skl_plane_ctl_format(uint32_t pixel_format)
2994 {
2995         switch (pixel_format) {
2996         case DRM_FORMAT_C8:
2997                 return PLANE_CTL_FORMAT_INDEXED;
2998         case DRM_FORMAT_RGB565:
2999                 return PLANE_CTL_FORMAT_RGB_565;
3000         case DRM_FORMAT_XBGR8888:
3001                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3002         case DRM_FORMAT_XRGB8888:
3003                 return PLANE_CTL_FORMAT_XRGB_8888;
3004         /*
3005          * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3006          * to be already pre-multiplied. We need to add a knob (or a different
3007          * DRM_FORMAT) for user-space to configure that.
3008          */
3009         case DRM_FORMAT_ABGR8888:
3010                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3011                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3012         case DRM_FORMAT_ARGB8888:
3013                 return PLANE_CTL_FORMAT_XRGB_8888 |
3014                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3015         case DRM_FORMAT_XRGB2101010:
3016                 return PLANE_CTL_FORMAT_XRGB_2101010;
3017         case DRM_FORMAT_XBGR2101010:
3018                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3019         case DRM_FORMAT_YUYV:
3020                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3021         case DRM_FORMAT_YVYU:
3022                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3023         case DRM_FORMAT_UYVY:
3024                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3025         case DRM_FORMAT_VYUY:
3026                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3027         default:
3028                 MISSING_CASE(pixel_format);
3029         }
3030 
3031         return 0;
3032 }
3033 
3034 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3035 {
3036         switch (fb_modifier) {
3037         case DRM_FORMAT_MOD_NONE:
3038                 break;
3039         case I915_FORMAT_MOD_X_TILED:
3040                 return PLANE_CTL_TILED_X;
3041         case I915_FORMAT_MOD_Y_TILED:
3042                 return PLANE_CTL_TILED_Y;
3043         case I915_FORMAT_MOD_Yf_TILED:
3044                 return PLANE_CTL_TILED_YF;
3045         default:
3046                 MISSING_CASE(fb_modifier);
3047         }
3048 
3049         return 0;
3050 }
3051 
3052 u32 skl_plane_ctl_rotation(unsigned int rotation)
3053 {
3054         switch (rotation) {
3055         case BIT(DRM_ROTATE_0):
3056                 break;
3057         /*
3058          * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3059          * while i915 HW rotation is clockwise, thats why this swapping.
3060          */
3061         case BIT(DRM_ROTATE_90):
3062                 return PLANE_CTL_ROTATE_270;
3063         case BIT(DRM_ROTATE_180):
3064                 return PLANE_CTL_ROTATE_180;
3065         case BIT(DRM_ROTATE_270):
3066                 return PLANE_CTL_ROTATE_90;
3067         default:
3068                 MISSING_CASE(rotation);
3069         }
3070 
3071         return 0;
3072 }
3073 
3074 static void skylake_update_primary_plane(struct drm_plane *plane,
3075                                          const struct intel_crtc_state *crtc_state,
3076                                          const struct intel_plane_state *plane_state)
3077 {
3078         struct drm_device *dev = plane->dev;
3079         struct drm_i915_private *dev_priv = dev->dev_private;
3080         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3081         struct drm_framebuffer *fb = plane_state->base.fb;
3082         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3083         int pipe = intel_crtc->pipe;
3084         u32 plane_ctl, stride_div, stride;
3085         u32 tile_height, plane_offset, plane_size;
3086         unsigned int rotation = plane_state->base.rotation;
3087         int x_offset, y_offset;
3088         u32 surf_addr;
3089         int scaler_id = plane_state->scaler_id;
3090         int src_x = plane_state->src.x1 >> 16;
3091         int src_y = plane_state->src.y1 >> 16;
3092         int src_w = drm_rect_width(&plane_state->src) >> 16;
3093         int src_h = drm_rect_height(&plane_state->src) >> 16;
3094         int dst_x = plane_state->dst.x1;
3095         int dst_y = plane_state->dst.y1;
3096         int dst_w = drm_rect_width(&plane_state->dst);
3097         int dst_h = drm_rect_height(&plane_state->dst);
3098 
3099         plane_ctl = PLANE_CTL_ENABLE |
3100                     PLANE_CTL_PIPE_GAMMA_ENABLE |
3101                     PLANE_CTL_PIPE_CSC_ENABLE;
3102 
3103         plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3104         plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3105         plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3106         plane_ctl |= skl_plane_ctl_rotation(rotation);
3107 
3108         stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3109                                                fb->pixel_format);
3110         surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3111 
3112         WARN_ON(drm_rect_width(&plane_state->src) == 0);
3113 
3114         if (intel_rotation_90_or_270(rotation)) {
3115                 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3116 
3117                 /* stride = Surface height in tiles */
3118                 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3119                 stride = DIV_ROUND_UP(fb->height, tile_height);
3120                 x_offset = stride * tile_height - src_y - src_h;
3121                 y_offset = src_x;
3122                 plane_size = (src_w - 1) << 16 | (src_h - 1);
3123         } else {
3124                 stride = fb->pitches[0] / stride_div;
3125                 x_offset = src_x;
3126                 y_offset = src_y;
3127                 plane_size = (src_h - 1) << 16 | (src_w - 1);
3128         }
3129         plane_offset = y_offset << 16 | x_offset;
3130 
3131         intel_crtc->adjusted_x = x_offset;
3132         intel_crtc->adjusted_y = y_offset;
3133 
3134         I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3135         I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3136         I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3137         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3138 
3139         if (scaler_id >= 0) {
3140                 uint32_t ps_ctrl = 0;
3141 
3142                 WARN_ON(!dst_w || !dst_h);
3143                 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3144                         crtc_state->scaler_state.scalers[scaler_id].mode;
3145                 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3146                 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3147                 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3148                 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3149                 I915_WRITE(PLANE_POS(pipe, 0), 0);
3150         } else {
3151                 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3152         }
3153 
3154         I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3155 
3156         POSTING_READ(PLANE_SURF(pipe, 0));
3157 }
3158 
3159 static void skylake_disable_primary_plane(struct drm_plane *primary,
3160                                           struct drm_crtc *crtc)
3161 {
3162         struct drm_device *dev = crtc->dev;
3163         struct drm_i915_private *dev_priv = dev->dev_private;
3164         int pipe = to_intel_crtc(crtc)->pipe;
3165 
3166         I915_WRITE(PLANE_CTL(pipe, 0), 0);
3167         I915_WRITE(PLANE_SURF(pipe, 0), 0);
3168         POSTING_READ(PLANE_SURF(pipe, 0));
3169 }
3170 
3171 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3172 static int
3173 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3174                            int x, int y, enum mode_set_atomic state)
3175 {
3176         /* Support for kgdboc is disabled, this needs a major rework. */
3177         DRM_ERROR("legacy panic handler not supported any more.\n");
3178 
3179         return -ENODEV;
3180 }
3181 
3182 static void intel_complete_page_flips(struct drm_device *dev)
3183 {
3184         struct drm_crtc *crtc;
3185 
3186         for_each_crtc(dev, crtc) {
3187                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3188                 enum plane plane = intel_crtc->plane;
3189 
3190                 intel_prepare_page_flip(dev, plane);
3191                 intel_finish_page_flip_plane(dev, plane);
3192         }
3193 }
3194 
3195 static void intel_update_primary_planes(struct drm_device *dev)
3196 {
3197         struct drm_crtc *crtc;
3198 
3199         for_each_crtc(dev, crtc) {
3200                 struct intel_plane *plane = to_intel_plane(crtc->primary);
3201                 struct intel_plane_state *plane_state;
3202 
3203                 drm_modeset_lock_crtc(crtc, &plane->base);
3204                 plane_state = to_intel_plane_state(plane->base.state);
3205 
3206                 if (plane_state->visible)
3207                         plane->update_plane(&plane->base,
3208                                             to_intel_crtc_state(crtc->state),
3209                                             plane_state);
3210 
3211                 drm_modeset_unlock_crtc(crtc);
3212         }
3213 }
3214 
3215 void intel_prepare_reset(struct drm_device *dev)
3216 {
3217         /* no reset support for gen2 */
3218         if (IS_GEN2(dev))
3219                 return;
3220 
3221         /* reset doesn't touch the display */
3222         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3223                 return;
3224 
3225         drm_modeset_lock_all(dev);
3226         /*
3227          * Disabling the crtcs gracefully seems nicer. Also the
3228          * g33 docs say we should at least disable all the planes.
3229          */
3230         intel_display_suspend(dev);
3231 }
3232 
3233 void intel_finish_reset(struct drm_device *dev)
3234 {
3235         struct drm_i915_private *dev_priv = to_i915(dev);
3236 
3237         /*
3238          * Flips in the rings will be nuked by the reset,
3239          * so complete all pending flips so that user space
3240          * will get its events and not get stuck.
3241          */
3242         intel_complete_page_flips(dev);
3243 
3244         /* no reset support for gen2 */
3245         if (IS_GEN2(dev))
3246                 return;
3247 
3248         /* reset doesn't touch the display */
3249         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3250                 /*
3251                  * Flips in the rings have been nuked by the reset,
3252                  * so update the base address of all primary
3253                  * planes to the the last fb to make sure we're
3254                  * showing the correct fb after a reset.
3255                  *
3256                  * FIXME: Atomic will make this obsolete since we won't schedule
3257                  * CS-based flips (which might get lost in gpu resets) any more.
3258                  */
3259                 intel_update_primary_planes(dev);
3260                 return;
3261         }
3262 
3263         /*
3264          * The display has been reset as well,
3265          * so need a full re-initialization.
3266          */
3267         intel_runtime_pm_disable_interrupts(dev_priv);
3268         intel_runtime_pm_enable_interrupts(dev_priv);
3269 
3270         intel_modeset_init_hw(dev);
3271 
3272         spin_lock_irq(&dev_priv->irq_lock);
3273         if (dev_priv->display.hpd_irq_setup)
3274                 dev_priv->display.hpd_irq_setup(dev);
3275         spin_unlock_irq(&dev_priv->irq_lock);
3276 
3277         intel_display_resume(dev);
3278 
3279         intel_hpd_init(dev_priv);
3280 
3281         drm_modeset_unlock_all(dev);
3282 }
3283 
3284 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3285 {
3286         struct drm_device *dev = crtc->dev;
3287         struct drm_i915_private *dev_priv = dev->dev_private;
3288         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3289         bool pending;
3290 
3291         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3292             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3293                 return false;
3294 
3295         spin_lock_irq(&dev->event_lock);
3296         pending = to_intel_crtc(crtc)->unpin_work != NULL;
3297         spin_unlock_irq(&dev->event_lock);
3298 
3299         return pending;
3300 }
3301 
3302 static void intel_update_pipe_config(struct intel_crtc *crtc,
3303                                      struct intel_crtc_state *old_crtc_state)
3304 {
3305         struct drm_device *dev = crtc->base.dev;
3306         struct drm_i915_private *dev_priv = dev->dev_private;
3307         struct intel_crtc_state *pipe_config =
3308                 to_intel_crtc_state(crtc->base.state);
3309 
3310         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3311         crtc->base.mode = crtc->base.state->mode;
3312 
3313         DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3314                       old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3315                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3316 
3317         if (HAS_DDI(dev))
3318                 intel_set_pipe_csc(&crtc->base);
3319 
3320         /*
3321          * Update pipe size and adjust fitter if needed: the reason for this is
3322          * that in compute_mode_changes we check the native mode (not the pfit
3323          * mode) to see if we can flip rather than do a full mode set. In the
3324          * fastboot case, we'll flip, but if we don't update the pipesrc and
3325          * pfit state, we'll end up with a big fb scanned out into the wrong
3326          * sized surface.
3327          */
3328 
3329         I915_WRITE(PIPESRC(crtc->pipe),
3330                    ((pipe_config->pipe_src_w - 1) << 16) |
3331                    (pipe_config->pipe_src_h - 1));
3332 
3333         /* on skylake this is done by detaching scalers */
3334         if (INTEL_INFO(dev)->gen >= 9) {
3335                 skl_detach_scalers(crtc);
3336 
3337                 if (pipe_config->pch_pfit.enabled)
3338                         skylake_pfit_enable(crtc);
3339         } else if (HAS_PCH_SPLIT(dev)) {
3340                 if (pipe_config->pch_pfit.enabled)
3341                         ironlake_pfit_enable(crtc);
3342                 else if (old_crtc_state->pch_pfit.enabled)
3343                         ironlake_pfit_disable(crtc, true);
3344         }
3345 }
3346 
3347 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3348 {
3349         struct drm_device *dev = crtc->dev;
3350         struct drm_i915_private *dev_priv = dev->dev_private;
3351         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3352         int pipe = intel_crtc->pipe;
3353         i915_reg_t reg;
3354         u32 temp;
3355 
3356         /* enable normal train */
3357         reg = FDI_TX_CTL(pipe);
3358         temp = I915_READ(reg);
3359         if (IS_IVYBRIDGE(dev)) {
3360                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3361                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3362         } else {
3363                 temp &= ~FDI_LINK_TRAIN_NONE;
3364                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3365         }
3366         I915_WRITE(reg, temp);
3367 
3368         reg = FDI_RX_CTL(pipe);
3369         temp = I915_READ(reg);
3370         if (HAS_PCH_CPT(dev)) {
3371                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3372                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3373         } else {
3374                 temp &= ~FDI_LINK_TRAIN_NONE;
3375                 temp |= FDI_LINK_TRAIN_NONE;
3376         }
3377         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3378 
3379         /* wait one idle pattern time */
3380         POSTING_READ(reg);
3381         udelay(1000);
3382 
3383         /* IVB wants error correction enabled */
3384         if (IS_IVYBRIDGE(dev))
3385                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3386                            FDI_FE_ERRC_ENABLE);
3387 }
3388 
3389 /* The FDI link training functions for ILK/Ibexpeak. */
3390 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3391 {
3392         struct drm_device *dev = crtc->dev;
3393         struct drm_i915_private *dev_priv = dev->dev_private;
3394         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3395         int pipe = intel_crtc->pipe;
3396         i915_reg_t reg;
3397         u32 temp, tries;
3398 
3399         /* FDI needs bits from pipe first */
3400         assert_pipe_enabled(dev_priv, pipe);
3401 
3402         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3403            for train result */
3404         reg = FDI_RX_IMR(pipe);
3405         temp = I915_READ(reg);
3406         temp &= ~FDI_RX_SYMBOL_LOCK;
3407         temp &= ~FDI_RX_BIT_LOCK;
3408         I915_WRITE(reg, temp);
3409         I915_READ(reg);
3410         udelay(150);
3411 
3412         /* enable CPU FDI TX and PCH FDI RX */
3413         reg = FDI_TX_CTL(pipe);
3414         temp = I915_READ(reg);
3415         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3416         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3417         temp &= ~FDI_LINK_TRAIN_NONE;
3418         temp |= FDI_LINK_TRAIN_PATTERN_1;
3419         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3420 
3421         reg = FDI_RX_CTL(pipe);
3422         temp = I915_READ(reg);
3423         temp &= ~FDI_LINK_TRAIN_NONE;
3424         temp |= FDI_LINK_TRAIN_PATTERN_1;
3425         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3426 
3427         POSTING_READ(reg);
3428         udelay(150);
3429 
3430         /* Ironlake workaround, enable clock pointer after FDI enable*/
3431         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3432         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3433                    FDI_RX_PHASE_SYNC_POINTER_EN);
3434 
3435         reg = FDI_RX_IIR(pipe);
3436         for (tries = 0; tries < 5; tries++) {
3437                 temp = I915_READ(reg);
3438                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3439 
3440                 if ((temp & FDI_RX_BIT_LOCK)) {
3441                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3442                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3443                         break;
3444                 }
3445         }
3446         if (tries == 5)
3447                 DRM_ERROR("FDI train 1 fail!\n");
3448 
3449         /* Train 2 */
3450         reg = FDI_TX_CTL(pipe);
3451         temp = I915_READ(reg);
3452         temp &= ~FDI_LINK_TRAIN_NONE;
3453         temp |= FDI_LINK_TRAIN_PATTERN_2;
3454         I915_WRITE(reg, temp);
3455 
3456         reg = FDI_RX_CTL(pipe);
3457         temp = I915_READ(reg);
3458         temp &= ~FDI_LINK_TRAIN_NONE;
3459         temp |= FDI_LINK_TRAIN_PATTERN_2;
3460         I915_WRITE(reg, temp);
3461 
3462         POSTING_READ(reg);
3463         udelay(150);
3464 
3465         reg = FDI_RX_IIR(pipe);
3466         for (tries = 0; tries < 5; tries++) {
3467                 temp = I915_READ(reg);
3468                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3469 
3470                 if (temp & FDI_RX_SYMBOL_LOCK) {
3471                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3472                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3473                         break;
3474                 }
3475         }
3476         if (tries == 5)
3477                 DRM_ERROR("FDI train 2 fail!\n");
3478 
3479         DRM_DEBUG_KMS("FDI train done\n");
3480 
3481 }
3482 
3483 static const int snb_b_fdi_train_param[] = {
3484         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3485         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3486         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3487         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3488 };
3489 
3490 /* The FDI link training functions for SNB/Cougarpoint. */
3491 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3492 {
3493         struct drm_device *dev = crtc->dev;
3494         struct drm_i915_private *dev_priv = dev->dev_private;
3495         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3496         int pipe = intel_crtc->pipe;
3497         i915_reg_t reg;
3498         u32 temp, i, retry;
3499 
3500         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3501            for train result */
3502         reg = FDI_RX_IMR(pipe);
3503         temp = I915_READ(reg);
3504         temp &= ~FDI_RX_SYMBOL_LOCK;
3505         temp &= ~FDI_RX_BIT_LOCK;
3506         I915_WRITE(reg, temp);
3507 
3508         POSTING_READ(reg);
3509         udelay(150);
3510 
3511         /* enable CPU FDI TX and PCH FDI RX */
3512         reg = FDI_TX_CTL(pipe);
3513         temp = I915_READ(reg);
3514         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3515         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3516         temp &= ~FDI_LINK_TRAIN_NONE;
3517         temp |= FDI_LINK_TRAIN_PATTERN_1;
3518         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3519         /* SNB-B */
3520         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3521         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3522 
3523         I915_WRITE(FDI_RX_MISC(pipe),
3524                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3525 
3526         reg = FDI_RX_CTL(pipe);
3527         temp = I915_READ(reg);
3528         if (HAS_PCH_CPT(dev)) {
3529                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3530                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3531         } else {
3532                 temp &= ~FDI_LINK_TRAIN_NONE;
3533                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3534         }
3535         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3536 
3537         POSTING_READ(reg);
3538         udelay(150);
3539 
3540         for (i = 0; i < 4; i++) {
3541                 reg = FDI_TX_CTL(pipe);
3542                 temp = I915_READ(reg);
3543                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3544                 temp |= snb_b_fdi_train_param[i];
3545                 I915_WRITE(reg, temp);
3546 
3547                 POSTING_READ(reg);
3548                 udelay(500);
3549 
3550                 for (retry = 0; retry < 5; retry++) {
3551                         reg = FDI_RX_IIR(pipe);
3552                         temp = I915_READ(reg);
3553                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3554                         if (temp & FDI_RX_BIT_LOCK) {
3555                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3556                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3557                                 break;
3558                         }
3559                         udelay(50);
3560                 }
3561                 if (retry < 5)
3562                         break;
3563         }
3564         if (i == 4)
3565                 DRM_ERROR("FDI train 1 fail!\n");
3566 
3567         /* Train 2 */
3568         reg = FDI_TX_CTL(pipe);
3569         temp = I915_READ(reg);
3570         temp &= ~FDI_LINK_TRAIN_NONE;
3571         temp |= FDI_LINK_TRAIN_PATTERN_2;
3572         if (IS_GEN6(dev)) {
3573                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3574                 /* SNB-B */
3575                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3576         }
3577         I915_WRITE(reg, temp);
3578 
3579         reg = FDI_RX_CTL(pipe);
3580         temp = I915_READ(reg);
3581         if (HAS_PCH_CPT(dev)) {
3582                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3583                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3584         } else {
3585                 temp &= ~FDI_LINK_TRAIN_NONE;
3586                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3587         }
3588         I915_WRITE(reg, temp);
3589 
3590         POSTING_READ(reg);
3591         udelay(150);
3592 
3593         for (i = 0; i < 4; i++) {
3594                 reg = FDI_TX_CTL(pipe);
3595                 temp = I915_READ(reg);
3596                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3597                 temp |= snb_b_fdi_train_param[i];
3598                 I915_WRITE(reg, temp);
3599 
3600                 POSTING_READ(reg);
3601                 udelay(500);
3602 
3603                 for (retry = 0; retry < 5; retry++) {
3604                         reg = FDI_RX_IIR(pipe);
3605                         temp = I915_READ(reg);
3606                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3607                         if (temp & FDI_RX_SYMBOL_LOCK) {
3608                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3609                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3610                                 break;
3611                         }
3612                         udelay(50);
3613                 }
3614                 if (retry < 5)
3615                         break;
3616         }
3617         if (i == 4)
3618                 DRM_ERROR("FDI train 2 fail!\n");
3619 
3620         DRM_DEBUG_KMS("FDI train done.\n");
3621 }
3622 
3623 /* Manual link training for Ivy Bridge A0 parts */
3624 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3625 {
3626         struct drm_device *dev = crtc->dev;
3627         struct drm_i915_private *dev_priv = dev->dev_private;
3628         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3629         int pipe = intel_crtc->pipe;
3630         i915_reg_t reg;
3631         u32 temp, i, j;
3632 
3633         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3634            for train result */
3635         reg = FDI_RX_IMR(pipe);
3636         temp = I915_READ(reg);
3637         temp &= ~FDI_RX_SYMBOL_LOCK;
3638         temp &= ~FDI_RX_BIT_LOCK;
3639         I915_WRITE(reg, temp);
3640 
3641         POSTING_READ(reg);
3642         udelay(150);
3643 
3644         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3645                       I915_READ(FDI_RX_IIR(pipe)));
3646 
3647         /* Try each vswing and preemphasis setting twice before moving on */
3648         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3649                 /* disable first in case we need to retry */
3650                 reg = FDI_TX_CTL(pipe);
3651                 temp = I915_READ(reg);
3652                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3653                 temp &= ~FDI_TX_ENABLE;
3654                 I915_WRITE(reg, temp);
3655 
3656                 reg = FDI_RX_CTL(pipe);
3657                 temp = I915_READ(reg);
3658                 temp &= ~FDI_LINK_TRAIN_AUTO;
3659                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3660                 temp &= ~FDI_RX_ENABLE;
3661                 I915_WRITE(reg, temp);
3662 
3663                 /* enable CPU FDI TX and PCH FDI RX */
3664                 reg = FDI_TX_CTL(pipe);
3665                 temp = I915_READ(reg);
3666                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3667                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3668                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3669                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3670                 temp |= snb_b_fdi_train_param[j/2];
3671                 temp |= FDI_COMPOSITE_SYNC;
3672                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3673 
3674                 I915_WRITE(FDI_RX_MISC(pipe),
3675                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3676 
3677                 reg = FDI_RX_CTL(pipe);
3678                 temp = I915_READ(reg);
3679                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3680                 temp |= FDI_COMPOSITE_SYNC;
3681                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3682 
3683                 POSTING_READ(reg);
3684                 udelay(1); /* should be 0.5us */
3685 
3686                 for (i = 0; i < 4; i++) {
3687                         reg = FDI_RX_IIR(pipe);
3688                         temp = I915_READ(reg);
3689                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3690 
3691                         if (temp & FDI_RX_BIT_LOCK ||
3692                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3693                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3694                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3695                                               i);
3696                                 break;
3697                         }
3698                         udelay(1); /* should be 0.5us */
3699                 }
3700                 if (i == 4) {
3701                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3702                         continue;
3703                 }
3704 
3705                 /* Train 2 */
3706                 reg = FDI_TX_CTL(pipe);
3707                 temp = I915_READ(reg);
3708                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3709                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3710                 I915_WRITE(reg, temp);
3711 
3712                 reg = FDI_RX_CTL(pipe);
3713                 temp = I915_READ(reg);
3714                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3715                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3716                 I915_WRITE(reg, temp);
3717 
3718                 POSTING_READ(reg);
3719                 udelay(2); /* should be 1.5us */
3720 
3721                 for (i = 0; i < 4; i++) {
3722                         reg = FDI_RX_IIR(pipe);
3723                         temp = I915_READ(reg);
3724                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3725 
3726                         if (temp & FDI_RX_SYMBOL_LOCK ||
3727                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3728                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3729                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3730                                               i);
3731                                 goto train_done;
3732                         }
3733                         udelay(2); /* should be 1.5us */
3734                 }
3735                 if (i == 4)
3736                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3737         }
3738 
3739 train_done:
3740         DRM_DEBUG_KMS("FDI train done.\n");
3741 }
3742 
3743 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3744 {
3745         struct drm_device *dev = intel_crtc->base.dev;
3746         struct drm_i915_private *dev_priv = dev->dev_private;
3747         int pipe = intel_crtc->pipe;
3748         i915_reg_t reg;
3749         u32 temp;
3750 
3751         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3752         reg = FDI_RX_CTL(pipe);
3753         temp = I915_READ(reg);
3754         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3755         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3756         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3757         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3758 
3759         POSTING_READ(reg);
3760         udelay(200);
3761 
3762         /* Switch from Rawclk to PCDclk */
3763         temp = I915_READ(reg);
3764         I915_WRITE(reg, temp | FDI_PCDCLK);
3765 
3766         POSTING_READ(reg);
3767         udelay(200);
3768 
3769         /* Enable CPU FDI TX PLL, always on for Ironlake */
3770         reg = FDI_TX_CTL(pipe);
3771         temp = I915_READ(reg);
3772         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3773                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3774 
3775                 POSTING_READ(reg);
3776                 udelay(100);
3777         }
3778 }
3779 
3780 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3781 {
3782         struct drm_device *dev = intel_crtc->base.dev;
3783         struct drm_i915_private *dev_priv = dev->dev_private;
3784         int pipe = intel_crtc->pipe;
3785         i915_reg_t reg;
3786         u32 temp;
3787 
3788         /* Switch from PCDclk to Rawclk */
3789         reg = FDI_RX_CTL(pipe);
3790         temp = I915_READ(reg);
3791         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3792 
3793         /* Disable CPU FDI TX PLL */
3794         reg = FDI_TX_CTL(pipe);
3795         temp = I915_READ(reg);
3796         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3797 
3798         POSTING_READ(reg);
3799         udelay(100);
3800 
3801         reg = FDI_RX_CTL(pipe);
3802         temp = I915_READ(reg);
3803         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3804 
3805         /* Wait for the clocks to turn off. */
3806         POSTING_READ(reg);
3807         udelay(100);
3808 }
3809 
3810 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3811 {
3812         struct drm_device *dev = crtc->dev;
3813         struct drm_i915_private *dev_priv = dev->dev_private;
3814         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3815         int pipe = intel_crtc->pipe;
3816         i915_reg_t reg;
3817         u32 temp;
3818 
3819         /* disable CPU FDI tx and PCH FDI rx */
3820         reg = FDI_TX_CTL(pipe);
3821         temp = I915_READ(reg);
3822         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3823         POSTING_READ(reg);
3824 
3825         reg = FDI_RX_CTL(pipe);
3826         temp = I915_READ(reg);
3827         temp &= ~(0x7 << 16);
3828         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3829         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3830 
3831         POSTING_READ(reg);
3832         udelay(100);
3833 
3834         /* Ironlake workaround, disable clock pointer after downing FDI */
3835         if (HAS_PCH_IBX(dev))
3836                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3837 
3838         /* still set train pattern 1 */
3839         reg = FDI_TX_CTL(pipe);
3840         temp = I915_READ(reg);
3841         temp &= ~FDI_LINK_TRAIN_NONE;
3842         temp |= FDI_LINK_TRAIN_PATTERN_1;
3843         I915_WRITE(reg, temp);
3844 
3845         reg = FDI_RX_CTL(pipe);
3846         temp = I915_READ(reg);
3847         if (HAS_PCH_CPT(dev)) {
3848                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3849                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3850         } else {
3851                 temp &= ~FDI_LINK_TRAIN_NONE;
3852                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3853         }
3854         /* BPC in FDI rx is consistent with that in PIPECONF */
3855         temp &= ~(0x07 << 16);
3856         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3857         I915_WRITE(reg, temp);
3858 
3859         POSTING_READ(reg);
3860         udelay(100);
3861 }
3862 
3863 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3864 {
3865         struct intel_crtc *crtc;
3866 
3867         /* Note that we don't need to be called with mode_config.lock here
3868          * as our list of CRTC objects is static for the lifetime of the
3869          * device and so cannot disappear as we iterate. Similarly, we can
3870          * happily treat the predicates as racy, atomic checks as userspace
3871          * cannot claim and pin a new fb without at least acquring the
3872          * struct_mutex and so serialising with us.
3873          */
3874         for_each_intel_crtc(dev, crtc) {
3875                 if (atomic_read(&crtc->unpin_work_count) == 0)
3876                         continue;
3877 
3878                 if (crtc->unpin_work)
3879                         intel_wait_for_vblank(dev, crtc->pipe);
3880 
3881                 return true;
3882         }
3883 
3884         return false;
3885 }
3886 
3887 static void page_flip_completed(struct intel_crtc *intel_crtc)
3888 {
3889         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3890         struct intel_unpin_work *work = intel_crtc->unpin_work;
3891 
3892         /* ensure that the unpin work is consistent wrt ->pending. */
3893         smp_rmb();
3894         intel_crtc->unpin_work = NULL;
3895 
3896         if (work->event)
3897                 drm_send_vblank_event(intel_crtc->base.dev,
3898                                       intel_crtc->pipe,
3899                                       work->event);
3900 
3901         drm_crtc_vblank_put(&intel_crtc->base);
3902 
3903         wake_up_all(&dev_priv->pending_flip_queue);
3904         queue_work(dev_priv->wq, &work->work);
3905 
3906         trace_i915_flip_complete(intel_crtc->plane,
3907                                  work->pending_flip_obj);
3908 }
3909 
3910 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3911 {
3912         struct drm_device *dev = crtc->dev;
3913         struct drm_i915_private *dev_priv = dev->dev_private;
3914         long ret;
3915 
3916         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3917 
3918         ret = wait_event_interruptible_timeout(
3919                                         dev_priv->pending_flip_queue,
3920                                         !intel_crtc_has_pending_flip(crtc),
3921                                         60*HZ);
3922 
3923         if (ret < 0)
3924                 return ret;
3925 
3926         if (ret == 0) {
3927                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3928 
3929                 spin_lock_irq(&dev->event_lock);
3930                 if (intel_crtc->unpin_work) {
3931                         WARN_ONCE(1, "Removing stuck page flip\n");
3932                         page_flip_completed(intel_crtc);
3933                 }
3934                 spin_unlock_irq(&dev->event_lock);
3935         }
3936 
3937         return 0;
3938 }
3939 
3940 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3941 {
3942         u32 temp;
3943 
3944         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3945 
3946         mutex_lock(&dev_priv->sb_lock);
3947 
3948         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3949         temp |= SBI_SSCCTL_DISABLE;
3950         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3951 
3952         mutex_unlock(&dev_priv->sb_lock);
3953 }
3954 
3955 /* Program iCLKIP clock to the desired frequency */
3956 static void lpt_program_iclkip(struct drm_crtc *crtc)
3957 {
3958         struct drm_device *dev = crtc->dev;
3959         struct drm_i915_private *dev_priv = dev->dev_private;
3960         int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3961         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3962         u32 temp;
3963 
3964         lpt_disable_iclkip(dev_priv);
3965 
3966         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3967         if (clock == 20000) {
3968                 auxdiv = 1;
3969                 divsel = 0x41;
3970                 phaseinc = 0x20;
3971         } else {
3972                 /* The iCLK virtual clock root frequency is in MHz,
3973                  * but the adjusted_mode->crtc_clock in in KHz. To get the
3974                  * divisors, it is necessary to divide one by another, so we
3975                  * convert the virtual clock precision to KHz here for higher
3976                  * precision.
3977                  */
3978                 u32 iclk_virtual_root_freq = 172800 * 1000;
3979                 u32 iclk_pi_range = 64;
3980                 u32 desired_divisor, msb_divisor_value, pi_value;
3981 
3982                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
3983                 msb_divisor_value = desired_divisor / iclk_pi_range;
3984                 pi_value = desired_divisor % iclk_pi_range;
3985 
3986                 auxdiv = 0;
3987                 divsel = msb_divisor_value - 2;
3988                 phaseinc = pi_value;
3989         }
3990 
3991         /* This should not happen with any sane values */
3992         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3993                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3994         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3995                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3996 
3997         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3998                         clock,
3999                         auxdiv,
4000                         divsel,
4001                         phasedir,
4002                         phaseinc);
4003 
4004         mutex_lock(&dev_priv->sb_lock);
4005 
4006         /* Program SSCDIVINTPHASE6 */
4007         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4008         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4009         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4010         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4011         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4012         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4013         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4014         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4015 
4016         /* Program SSCAUXDIV */
4017         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4018         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4019         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4020         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4021 
4022         /* Enable modulator and associated divider */
4023         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4024         temp &= ~SBI_SSCCTL_DISABLE;
4025         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4026 
4027         mutex_unlock(&dev_priv->sb_lock);
4028 
4029         /* Wait for initialization time */
4030         udelay(24);
4031 
4032         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4033 }
4034 
4035 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4036                                                 enum pipe pch_transcoder)
4037 {
4038         struct drm_device *dev = crtc->base.dev;
4039         struct drm_i915_private *dev_priv = dev->dev_private;
4040         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4041 
4042         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4043                    I915_READ(HTOTAL(cpu_transcoder)));
4044         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4045                    I915_READ(HBLANK(cpu_transcoder)));
4046         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4047                    I915_READ(HSYNC(cpu_transcoder)));
4048 
4049         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4050                    I915_READ(VTOTAL(cpu_transcoder)));
4051         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4052                    I915_READ(VBLANK(cpu_transcoder)));
4053         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4054                    I915_READ(VSYNC(cpu_transcoder)));
4055         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4056                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4057 }
4058 
4059 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4060 {
4061         struct drm_i915_private *dev_priv = dev->dev_private;
4062         uint32_t temp;
4063 
4064         temp = I915_READ(SOUTH_CHICKEN1);
4065         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4066                 return;
4067 
4068         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4069         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4070 
4071         temp &= ~FDI_BC_BIFURCATION_SELECT;
4072         if (enable)
4073                 temp |= FDI_BC_BIFURCATION_SELECT;
4074 
4075         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4076         I915_WRITE(SOUTH_CHICKEN1, temp);
4077         POSTING_READ(SOUTH_CHICKEN1);
4078 }
4079 
4080 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4081 {
4082         struct drm_device *dev = intel_crtc->base.dev;
4083 
4084         switch (intel_crtc->pipe) {
4085         case PIPE_A:
4086                 break;
4087         case PIPE_B:
4088                 if (intel_crtc->config->fdi_lanes > 2)
4089                         cpt_set_fdi_bc_bifurcation(dev, false);
4090                 else
4091                         cpt_set_fdi_bc_bifurcation(dev, true);
4092 
4093                 break;
4094         case PIPE_C:
4095                 cpt_set_fdi_bc_bifurcation(dev, true);
4096 
4097                 break;
4098         default:
4099                 BUG();
4100         }
4101 }
4102 
4103 /* Return which DP Port should be selected for Transcoder DP control */
4104 static enum port
4105 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4106 {
4107         struct drm_device *dev = crtc->dev;
4108         struct intel_encoder *encoder;
4109 
4110         for_each_encoder_on_crtc(dev, crtc, encoder) {
4111                 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4112                     encoder->type == INTEL_OUTPUT_EDP)
4113                         return enc_to_dig_port(&encoder->base)->port;
4114         }
4115 
4116         return -1;
4117 }
4118 
4119 /*
4120  * Enable PCH resources required for PCH ports:
4121  *   - PCH PLLs
4122  *   - FDI training & RX/TX
4123  *   - update transcoder timings
4124  *   - DP transcoding bits
4125  *   - transcoder
4126  */
4127 static void ironlake_pch_enable(struct drm_crtc *crtc)
4128 {
4129         struct drm_device *dev = crtc->dev;
4130         struct drm_i915_private *dev_priv = dev->dev_private;
4131         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4132         int pipe = intel_crtc->pipe;
4133         u32 temp;
4134 
4135         assert_pch_transcoder_disabled(dev_priv, pipe);
4136 
4137         if (IS_IVYBRIDGE(dev))
4138                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4139 
4140         /* Write the TU size bits before fdi link training, so that error
4141          * detection works. */
4142         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4143                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4144 
4145         /*
4146          * Sometimes spurious CPU pipe underruns happen during FDI
4147          * training, at least with VGA+HDMI cloning. Suppress them.
4148          */
4149         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4150 
4151         /* For PCH output, training FDI link */
4152         dev_priv->display.fdi_link_train(crtc);
4153 
4154         /* We need to program the right clock selection before writing the pixel
4155          * mutliplier into the DPLL. */
4156         if (HAS_PCH_CPT(dev)) {
4157                 u32 sel;
4158 
4159                 temp = I915_READ(PCH_DPLL_SEL);
4160                 temp |= TRANS_DPLL_ENABLE(pipe);
4161                 sel = TRANS_DPLLB_SEL(pipe);
4162                 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4163                         temp |= sel;
4164                 else
4165                         temp &= ~sel;
4166                 I915_WRITE(PCH_DPLL_SEL, temp);
4167         }
4168 
4169         /* XXX: pch pll's can be enabled any time before we enable the PCH
4170          * transcoder, and we actually should do this to not upset any PCH
4171          * transcoder that already use the clock when we share it.
4172          *
4173          * Note that enable_shared_dpll tries to do the right thing, but
4174          * get_shared_dpll unconditionally resets the pll - we need that to have
4175          * the right LVDS enable sequence. */
4176         intel_enable_shared_dpll(intel_crtc);
4177 
4178         /* set transcoder timing, panel must allow it */
4179         assert_panel_unlocked(dev_priv, pipe);
4180         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4181 
4182         intel_fdi_normal_train(crtc);
4183 
4184         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4185 
4186         /* For PCH DP, enable TRANS_DP_CTL */
4187         if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4188                 const struct drm_display_mode *adjusted_mode =
4189                         &intel_crtc->config->base.adjusted_mode;
4190                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4191                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4192                 temp = I915_READ(reg);
4193                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4194                           TRANS_DP_SYNC_MASK |
4195                           TRANS_DP_BPC_MASK);
4196                 temp |= TRANS_DP_OUTPUT_ENABLE;
4197                 temp |= bpc << 9; /* same format but at 11:9 */
4198 
4199                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4200                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4201                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4202                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4203 
4204                 switch (intel_trans_dp_port_sel(crtc)) {
4205                 case PORT_B:
4206                         temp |= TRANS_DP_PORT_SEL_B;
4207                         break;
4208                 case PORT_C:
4209                         temp |= TRANS_DP_PORT_SEL_C;
4210                         break;
4211                 case PORT_D:
4212                         temp |= TRANS_DP_PORT_SEL_D;
4213                         break;
4214                 default:
4215                         BUG();
4216                 }
4217 
4218                 I915_WRITE(reg, temp);
4219         }
4220 
4221         ironlake_enable_pch_transcoder(dev_priv, pipe);
4222 }
4223 
4224 static void lpt_pch_enable(struct drm_crtc *crtc)
4225 {
4226         struct drm_device *dev = crtc->dev;
4227         struct drm_i915_private *dev_priv = dev->dev_private;
4228         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4229         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4230 
4231         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4232 
4233         lpt_program_iclkip(crtc);
4234 
4235         /* Set transcoder timing. */
4236         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4237 
4238         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4239 }
4240 
4241 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4242                                                 struct intel_crtc_state *crtc_state)
4243 {
4244         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4245         struct intel_shared_dpll *pll;
4246         struct intel_shared_dpll_config *shared_dpll;
4247         enum intel_dpll_id i;
4248         int max = dev_priv->num_shared_dpll;
4249 
4250         shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4251 
4252         if (HAS_PCH_IBX(dev_priv->dev)) {
4253                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4254                 i = (enum intel_dpll_id) crtc->pipe;
4255                 pll = &dev_priv->shared_dplls[i];
4256 
4257                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4258                               crtc->base.base.id, pll->name);
4259 
4260                 WARN_ON(shared_dpll[i].crtc_mask);
4261 
4262                 goto found;
4263         }
4264 
4265         if (IS_BROXTON(dev_priv->dev)) {
4266                 /* PLL is attached to port in bxt */
4267                 struct intel_encoder *encoder;
4268                 struct intel_digital_port *intel_dig_port;
4269 
4270                 encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4271                 if (WARN_ON(!encoder))
4272                         return NULL;
4273 
4274                 intel_dig_port = enc_to_dig_port(&encoder->base);
4275                 /* 1:1 mapping between ports and PLLs */
4276                 i = (enum intel_dpll_id)intel_dig_port->port;
4277                 pll = &dev_priv->shared_dplls[i];
4278                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4279                         crtc->base.base.id, pll->name);
4280                 WARN_ON(shared_dpll[i].crtc_mask);
4281 
4282                 goto found;
4283         } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4284                 /* Do not consider SPLL */
4285                 max = 2;
4286 
4287         for (i = 0; i < max; i++) {
4288                 pll = &dev_priv->shared_dplls[i];
4289 
4290                 /* Only want to check enabled timings first */
4291                 if (shared_dpll[i].crtc_mask == 0)
4292                         continue;
4293 
4294                 if (memcmp(&crtc_state->dpll_hw_state,
4295                            &shared_dpll[i].hw_state,
4296                            sizeof(crtc_state->dpll_hw_state)) == 0) {
4297                         DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4298                                       crtc->base.base.id, pll->name,
4299                                       shared_dpll[i].crtc_mask,
4300                                       pll->active);
4301                         goto found;
4302                 }
4303         }
4304 
4305         /* Ok no matching timings, maybe there's a free one? */
4306         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4307                 pll = &dev_priv->shared_dplls[i];
4308                 if (shared_dpll[i].crtc_mask == 0) {
4309                         DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4310                                       crtc->base.base.id, pll->name);
4311                         goto found;
4312                 }
4313         }
4314 
4315         return NULL;
4316 
4317 found:
4318         if (shared_dpll[i].crtc_mask == 0)
4319                 shared_dpll[i].hw_state =
4320                         crtc_state->dpll_hw_state;
4321 
4322         crtc_state->shared_dpll = i;
4323         DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4324                          pipe_name(crtc->pipe));
4325 
4326         shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
4327 
4328         return pll;
4329 }
4330 
4331 static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4332 {
4333         struct drm_i915_private *dev_priv = to_i915(state->dev);
4334         struct intel_shared_dpll_config *shared_dpll;
4335         struct intel_shared_dpll *pll;
4336         enum intel_dpll_id i;
4337 
4338         if (!to_intel_atomic_state(state)->dpll_set)
4339                 return;
4340 
4341         shared_dpll = to_intel_atomic_state(state)->shared_dpll;
4342         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4343                 pll = &dev_priv->shared_dplls[i];
4344                 pll->config = shared_dpll[i];
4345         }
4346 }
4347 
4348 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4349 {
4350         struct drm_i915_private *dev_priv = dev->dev_private;
4351         i915_reg_t dslreg = PIPEDSL(pipe);
4352         u32 temp;
4353 
4354         temp = I915_READ(dslreg);
4355         udelay(500);
4356         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4357                 if (wait_for(I915_READ(dslreg) != temp, 5))
4358                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4359         }
4360 }
4361 
4362 static int
4363 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4364                   unsigned scaler_user, int *scaler_id, unsigned int rotation,
4365                   int src_w, int src_h, int dst_w, int dst_h)
4366 {
4367         struct intel_crtc_scaler_state *scaler_state =
4368                 &crtc_state->scaler_state;
4369         struct intel_crtc *intel_crtc =
4370                 to_intel_crtc(crtc_state->base.crtc);
4371         int need_scaling;
4372 
4373         need_scaling = intel_rotation_90_or_270(rotation) ?
4374                 (src_h != dst_w || src_w != dst_h):
4375                 (src_w != dst_w || src_h != dst_h);
4376 
4377         /*
4378          * if plane is being disabled or scaler is no more required or force detach
4379          *  - free scaler binded to this plane/crtc
4380          *  - in order to do this, update crtc->scaler_usage
4381          *
4382          * Here scaler state in crtc_state is set free so that
4383          * scaler can be assigned to other user. Actual register
4384          * update to free the scaler is done in plane/panel-fit programming.
4385          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4386          */
4387         if (force_detach || !need_scaling) {
4388                 if (*scaler_id >= 0) {
4389                         scaler_state->scaler_users &= ~(1 << scaler_user);
4390                         scaler_state->scalers[*scaler_id].in_use = 0;
4391 
4392                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4393                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4394                                 intel_crtc->pipe, scaler_user, *scaler_id,
4395                                 scaler_state->scaler_users);
4396                         *scaler_id = -1;
4397                 }
4398                 return 0;
4399         }
4400 
4401         /* range checks */
4402         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4403                 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4404 
4405                 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4406                 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4407                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4408                         "size is out of scaler range\n",
4409                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4410                 return -EINVAL;
4411         }
4412 
4413         /* mark this plane as a scaler user in crtc_state */
4414         scaler_state->scaler_users |= (1 << scaler_user);
4415         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4416                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4417                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4418                 scaler_state->scaler_users);
4419 
4420         return 0;
4421 }
4422 
4423 /**
4424  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4425  *
4426  * @state: crtc's scaler state
4427  *
4428  * Return
4429  *     0 - scaler_usage updated successfully
4430  *    error - requested scaling cannot be supported or other error condition
4431  */
4432 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4433 {
4434         struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4435         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4436 
4437         DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4438                       intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4439 
4440         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4441                 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4442                 state->pipe_src_w, state->pipe_src_h,
4443                 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4444 }
4445 
4446 /**
4447  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4448  *
4449  * @state: crtc's scaler state
4450  * @plane_state: atomic plane state to update
4451  *
4452  * Return
4453  *     0 - scaler_usage updated successfully
4454  *    error - requested scaling cannot be supported or other error condition
4455  */
4456 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4457                                    struct intel_plane_state *plane_state)
4458 {
4459 
4460         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4461         struct intel_plane *intel_plane =
4462                 to_intel_plane(plane_state->base.plane);
4463         struct drm_framebuffer *fb = plane_state->base.fb;
4464         int ret;
4465 
4466         bool force_detach = !fb || !plane_state->visible;
4467 
4468         DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4469                       intel_plane->base.base.id, intel_crtc->pipe,
4470                       drm_plane_index(&intel_plane->base));
4471 
4472         ret = skl_update_scaler(crtc_state, force_detach,
4473                                 drm_plane_index(&intel_plane->base),
4474                                 &plane_state->scaler_id,
4475                                 plane_state->base.rotation,
4476                                 drm_rect_width(&plane_state->src) >> 16,
4477                                 drm_rect_height(&plane_state->src) >> 16,
4478                                 drm_rect_width(&plane_state->dst),
4479                                 drm_rect_height(&plane_state->dst));
4480 
4481         if (ret || plane_state->scaler_id < 0)
4482                 return ret;
4483 
4484         /* check colorkey */
4485         if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4486                 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4487                               intel_plane->base.base.id);
4488                 return -EINVAL;
4489         }
4490 
4491         /* Check src format */
4492         switch (fb->pixel_format) {
4493         case DRM_FORMAT_RGB565:
4494         case DRM_FORMAT_XBGR8888:
4495         case DRM_FORMAT_XRGB8888:
4496         case DRM_FORMAT_ABGR8888:
4497         case DRM_FORMAT_ARGB8888:
4498         case DRM_FORMAT_XRGB2101010:
4499         case DRM_FORMAT_XBGR2101010:
4500         case DRM_FORMAT_YUYV:
4501         case DRM_FORMAT_YVYU:
4502         case DRM_FORMAT_UYVY:
4503         case DRM_FORMAT_VYUY:
4504                 break;
4505         default:
4506                 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4507                         intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4508                 return -EINVAL;
4509         }
4510 
4511         return 0;
4512 }
4513 
4514 static void skylake_scaler_disable(struct intel_crtc *crtc)
4515 {
4516         int i;
4517 
4518         for (i = 0; i < crtc->num_scalers; i++)
4519                 skl_detach_scaler(crtc, i);
4520 }
4521 
4522 static void skylake_pfit_enable(struct intel_crtc *crtc)
4523 {
4524         struct drm_device *dev = crtc->base.dev;
4525         struct drm_i915_private *dev_priv = dev->dev_private;
4526         int pipe = crtc->pipe;
4527         struct intel_crtc_scaler_state *scaler_state =
4528                 &crtc->config->scaler_state;
4529 
4530         DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4531 
4532         if (crtc->config->pch_pfit.enabled) {
4533                 int id;
4534 
4535                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4536                         DRM_ERROR("Requesting pfit without getting a scaler first\n");
4537                         return;
4538                 }
4539 
4540                 id = scaler_state->scaler_id;
4541                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4542                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4543                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4544                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4545 
4546                 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4547         }
4548 }
4549 
4550 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4551 {
4552         struct drm_device *dev = crtc->base.dev;
4553         struct drm_i915_private *dev_priv = dev->dev_private;
4554         int pipe = crtc->pipe;
4555 
4556         if (crtc->config->pch_pfit.enabled) {
4557                 /* Force use of hard-coded filter coefficients
4558                  * as some pre-programmed values are broken,
4559                  * e.g. x201.
4560                  */
4561                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4562                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4563                                                  PF_PIPE_SEL_IVB(pipe));
4564                 else
4565                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4566                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4567                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4568         }
4569 }
4570 
4571 void hsw_enable_ips(struct intel_crtc *crtc)
4572 {
4573         struct drm_device *dev = crtc->base.dev;
4574         struct drm_i915_private *dev_priv = dev->dev_private;
4575 
4576         if (!crtc->config->ips_enabled)
4577                 return;
4578 
4579         /* We can only enable IPS after we enable a plane and wait for a vblank */
4580         intel_wait_for_vblank(dev, crtc->pipe);
4581 
4582         assert_plane_enabled(dev_priv, crtc->plane);
4583         if (IS_BROADWELL(dev)) {
4584                 mutex_lock(&dev_priv->rps.hw_lock);
4585                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4586                 mutex_unlock(&dev_priv->rps.hw_lock);
4587                 /* Quoting Art Runyan: "its not safe to expect any particular
4588                  * value in IPS_CTL bit 31 after enabling IPS through the
4589                  * mailbox." Moreover, the mailbox may return a bogus state,
4590                  * so we need to just enable it and continue on.
4591                  */
4592         } else {
4593                 I915_WRITE(IPS_CTL, IPS_ENABLE);
4594                 /* The bit only becomes 1 in the next vblank, so this wait here
4595                  * is essentially intel_wait_for_vblank. If we don't have this
4596                  * and don't wait for vblanks until the end of crtc_enable, then
4597                  * the HW state readout code will complain that the expected
4598                  * IPS_CTL value is not the one we read. */
4599                 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4600                         DRM_ERROR("Timed out waiting for IPS enable\n");
4601         }
4602 }
4603 
4604 void hsw_disable_ips(struct intel_crtc *crtc)
4605 {
4606         struct drm_device *dev = crtc->base.dev;
4607         struct drm_i915_private *dev_priv = dev->dev_private;
4608 
4609         if (!crtc->config->ips_enabled)
4610                 return;
4611 
4612         assert_plane_enabled(dev_priv, crtc->plane);
4613         if (IS_BROADWELL(dev)) {
4614                 mutex_lock(&dev_priv->rps.hw_lock);
4615                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4616                 mutex_unlock(&dev_priv->rps.hw_lock);
4617                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4618                 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4619                         DRM_ERROR("Timed out waiting for IPS disable\n");
4620         } else {
4621                 I915_WRITE(IPS_CTL, 0);
4622                 POSTING_READ(IPS_CTL);
4623         }
4624 
4625         /* We need to wait for a vblank before we can disable the plane. */
4626         intel_wait_for_vblank(dev, crtc->pipe);
4627 }
4628 
4629 /** Loads the palette/gamma unit for the CRTC with the prepared values */
4630 static void intel_crtc_load_lut(struct drm_crtc *crtc)
4631 {
4632         struct drm_device *dev = crtc->dev;
4633         struct drm_i915_private *dev_priv = dev->dev_private;
4634         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4635         enum pipe pipe = intel_crtc->pipe;
4636         int i;
4637         bool reenable_ips = false;
4638 
4639         /* The clocks have to be on to load the palette. */
4640         if (!crtc->state->active)
4641                 return;
4642 
4643         if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4644                 if (intel_crtc->config->has_dsi_encoder)
4645                         assert_dsi_pll_enabled(dev_priv);
4646                 else
4647                         assert_pll_enabled(dev_priv, pipe);
4648         }
4649 
4650         /* Workaround : Do not read or write the pipe palette/gamma data while
4651          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4652          */
4653         if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4654             ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4655              GAMMA_MODE_MODE_SPLIT)) {
4656                 hsw_disable_ips(intel_crtc);
4657                 reenable_ips = true;
4658         }
4659 
4660         for (i = 0; i < 256; i++) {
4661                 i915_reg_t palreg;
4662 
4663                 if (HAS_GMCH_DISPLAY(dev))
4664                         palreg = PALETTE(pipe, i);
4665                 else
4666                         palreg = LGC_PALETTE(pipe, i);
4667 
4668                 I915_WRITE(palreg,
4669                            (intel_crtc->lut_r[i] << 16) |
4670                            (intel_crtc->lut_g[i] << 8) |
4671                            intel_crtc->lut_b[i]);
4672         }
4673 
4674         if (reenable_ips)
4675                 hsw_enable_ips(intel_crtc);
4676 }
4677 
4678 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4679 {
4680         if (intel_crtc->overlay) {
4681                 struct drm_device *dev = intel_crtc->base.dev;
4682                 struct drm_i915_private *dev_priv = dev->dev_private;
4683 
4684                 mutex_lock(&dev->struct_mutex);
4685                 dev_priv->mm.interruptible = false;
4686                 (void) intel_overlay_switch_off(intel_crtc->overlay);
4687                 dev_priv->mm.interruptible = true;
4688                 mutex_unlock(&dev->struct_mutex);
4689         }
4690 
4691         /* Let userspace switch the overlay on again. In most cases userspace
4692          * has to recompute where to put it anyway.
4693          */
4694 }
4695 
4696 /**
4697  * intel_post_enable_primary - Perform operations after enabling primary plane
4698  * @crtc: the CRTC whose primary plane was just enabled
4699  *
4700  * Performs potentially sleeping operations that must be done after the primary
4701  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4702  * called due to an explicit primary plane update, or due to an implicit
4703  * re-enable that is caused when a sprite plane is updated to no longer
4704  * completely hide the primary plane.
4705  */
4706 static void
4707 intel_post_enable_primary(struct drm_crtc *crtc)
4708 {
4709         struct drm_device *dev = crtc->dev;
4710         struct drm_i915_private *dev_priv = dev->dev_private;
4711         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4712         int pipe = intel_crtc->pipe;
4713 
4714         /*
4715          * FIXME IPS should be fine as long as one plane is
4716          * enabled, but in practice it seems to have problems
4717          * when going from primary only to sprite only and vice
4718          * versa.
4719          */
4720         hsw_enable_ips(intel_crtc);
4721 
4722         /*
4723          * Gen2 reports pipe underruns whenever all planes are disabled.
4724          * So don't enable underrun reporting before at least some planes
4725          * are enabled.
4726          * FIXME: Need to fix the logic to work when we turn off all planes
4727          * but leave the pipe running.
4728          */
4729         if (IS_GEN2(dev))
4730                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4731 
4732         /* Underruns don't always raise interrupts, so check manually. */
4733         intel_check_cpu_fifo_underruns(dev_priv);
4734         intel_check_pch_fifo_underruns(dev_priv);
4735 }
4736 
4737 /**
4738  * intel_pre_disable_primary - Perform operations before disabling primary plane
4739  * @crtc: the CRTC whose primary plane is to be disabled
4740  *
4741  * Performs potentially sleeping operations that must be done before the
4742  * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4743  * be called due to an explicit primary plane update, or due to an implicit
4744  * disable that is caused when a sprite plane completely hides the primary
4745  * plane.
4746  */
4747 static void
4748 intel_pre_disable_primary(struct drm_crtc *crtc)
4749 {
4750         struct drm_device *dev = crtc->dev;
4751         struct drm_i915_private *dev_priv = dev->dev_private;
4752         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4753         int pipe = intel_crtc->pipe;
4754 
4755         /*
4756          * Gen2 reports pipe underruns whenever all planes are disabled.
4757          * So diasble underrun reporting before all the planes get disabled.
4758          * FIXME: Need to fix the logic to work when we turn off all planes
4759          * but leave the pipe running.
4760          */
4761         if (IS_GEN2(dev))
4762                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4763 
4764         /*
4765          * Vblank time updates from the shadow to live plane control register
4766          * are blocked if the memory self-refresh mode is active at that
4767          * moment. So to make sure the plane gets truly disabled, disable
4768          * first the self-refresh mode. The self-refresh enable bit in turn
4769          * will be checked/applied by the HW only at the next frame start
4770          * event which is after the vblank start event, so we need to have a
4771          * wait-for-vblank between disabling the plane and the pipe.
4772          */
4773         if (HAS_GMCH_DISPLAY(dev)) {
4774                 intel_set_memory_cxsr(dev_priv, false);
4775                 dev_priv->wm.vlv.cxsr = false;
4776                 intel_wait_for_vblank(dev, pipe);
4777         }
4778 
4779         /*
4780          * FIXME IPS should be fine as long as one plane is
4781          * enabled, but in practice it seems to have problems
4782          * when going from primary only to sprite only and vice
4783          * versa.
4784          */
4785         hsw_disable_ips(intel_crtc);
4786 }
4787 
4788 static void intel_post_plane_update(struct intel_crtc *crtc)
4789 {
4790         struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4791         struct intel_crtc_state *pipe_config =
4792                 to_intel_crtc_state(crtc->base.state);
4793         struct drm_device *dev = crtc->base.dev;
4794 
4795         intel_frontbuffer_flip(dev, atomic->fb_bits);
4796 
4797         crtc->wm.cxsr_allowed = true;
4798 
4799         if (pipe_config->wm_changed && pipe_config->base.active)
4800                 intel_update_watermarks(&crtc->base);
4801 
4802         if (atomic->update_fbc)
4803                 intel_fbc_post_update(crtc);
4804 
4805         if (atomic->post_enable_primary)
4806                 intel_post_enable_primary(&crtc->base);
4807 
4808         memset(atomic, 0, sizeof(*atomic));
4809 }
4810 
4811 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4812 {
4813         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4814         struct drm_device *dev = crtc->base.dev;
4815         struct drm_i915_private *dev_priv = dev->dev_private;
4816         struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4817         struct intel_crtc_state *pipe_config =
4818                 to_intel_crtc_state(crtc->base.state);
4819         struct drm_atomic_state *old_state = old_crtc_state->base.state;
4820         struct drm_plane *primary = crtc->base.primary;
4821         struct drm_plane_state *old_pri_state =
4822                 drm_atomic_get_existing_plane_state(old_state, primary);
4823         bool modeset = needs_modeset(&pipe_config->base);
4824 
4825         if (atomic->update_fbc)
4826                 intel_fbc_pre_update(crtc);
4827 
4828         if (old_pri_state) {
4829                 struct intel_plane_state *primary_state =
4830                         to_intel_plane_state(primary->state);
4831                 struct intel_plane_state *old_primary_state =
4832                         to_intel_plane_state(old_pri_state);
4833 
4834                 if (old_primary_state->visible &&
4835                     (modeset || !primary_state->visible))
4836                         intel_pre_disable_primary(&crtc->base);
4837         }
4838 
4839         if (pipe_config->disable_cxsr) {
4840                 crtc->wm.cxsr_allowed = false;
4841 
4842                 if (old_crtc_state->base.active)
4843                         intel_set_memory_cxsr(dev_priv, false);
4844         }
4845 
4846         if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
4847                 intel_update_watermarks(&crtc->base);
4848 }
4849 
4850 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4851 {
4852         struct drm_device *dev = crtc->dev;
4853         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4854         struct drm_plane *p;
4855         int pipe = intel_crtc->pipe;
4856 
4857         intel_crtc_dpms_overlay_disable(intel_crtc);
4858 
4859         drm_for_each_plane_mask(p, dev, plane_mask)
4860                 to_intel_plane(p)->disable_plane(p, crtc);
4861 
4862         /*
4863          * FIXME: Once we grow proper nuclear flip support out of this we need
4864          * to compute the mask of flip planes precisely. For the time being
4865          * consider this a flip to a NULL plane.
4866          */
4867         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4868 }
4869 
4870 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4871 {
4872         struct drm_device *dev = crtc->dev;
4873         struct drm_i915_private *dev_priv = dev->dev_private;
4874         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4875         struct intel_encoder *encoder;
4876         int pipe = intel_crtc->pipe;
4877 
4878         if (WARN_ON(intel_crtc->active))
4879                 return;
4880 
4881         if (intel_crtc->config->has_pch_encoder)
4882                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4883 
4884         if (intel_crtc->config->has_pch_encoder)
4885                 intel_prepare_shared_dpll(intel_crtc);
4886 
4887         if (intel_crtc->config->has_dp_encoder)
4888                 intel_dp_set_m_n(intel_crtc, M1_N1);
4889 
4890         intel_set_pipe_timings(intel_crtc);
4891 
4892         if (intel_crtc->config->has_pch_encoder) {
4893                 intel_cpu_transcoder_set_m_n(intel_crtc,
4894                                      &intel_crtc->config->fdi_m_n, NULL);
4895         }
4896 
4897         ironlake_set_pipeconf(crtc);
4898 
4899         intel_crtc->active = true;
4900 
4901         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4902 
4903         for_each_encoder_on_crtc(dev, crtc, encoder)
4904                 if (encoder->pre_enable)
4905                         encoder->pre_enable(encoder);
4906 
4907         if (intel_crtc->config->has_pch_encoder) {
4908                 /* Note: FDI PLL enabling _must_ be done before we enable the
4909                  * cpu pipes, hence this is separate from all the other fdi/pch
4910                  * enabling. */
4911                 ironlake_fdi_pll_enable(intel_crtc);
4912         } else {
4913                 assert_fdi_tx_disabled(dev_priv, pipe);
4914                 assert_fdi_rx_disabled(dev_priv, pipe);
4915         }
4916 
4917         ironlake_pfit_enable(intel_crtc);
4918 
4919         /*
4920          * On ILK+ LUT must be loaded before the pipe is running but with
4921          * clocks enabled
4922          */
4923         intel_crtc_load_lut(crtc);
4924 
4925         intel_update_watermarks(crtc);
4926         intel_enable_pipe(intel_crtc);
4927 
4928         if (intel_crtc->config->has_pch_encoder)
4929                 ironlake_pch_enable(crtc);
4930 
4931         assert_vblank_disabled(crtc);
4932         drm_crtc_vblank_on(crtc);
4933 
4934         for_each_encoder_on_crtc(dev, crtc, encoder)
4935                 encoder->enable(encoder);
4936 
4937         if (HAS_PCH_CPT(dev))
4938                 cpt_verify_modeset(dev, intel_crtc->pipe);
4939 
4940         /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4941         if (intel_crtc->config->has_pch_encoder)
4942                 intel_wait_for_vblank(dev, pipe);
4943         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4944 }
4945 
4946 /* IPS only exists on ULT machines and is tied to pipe A. */
4947 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4948 {
4949         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4950 }
4951 
4952 static void haswell_crtc_enable(struct drm_crtc *crtc)
4953 {
4954         struct drm_device *dev = crtc->dev;
4955         struct drm_i915_private *dev_priv = dev->dev_private;
4956         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4957         struct intel_encoder *encoder;
4958         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4959         struct intel_crtc_state *pipe_config =
4960                 to_intel_crtc_state(crtc->state);
4961 
4962         if (WARN_ON(intel_crtc->active))
4963                 return;
4964 
4965         if (intel_crtc->config->has_pch_encoder)
4966                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4967                                                       false);
4968 
4969         if (intel_crtc_to_shared_dpll(intel_crtc))
4970                 intel_enable_shared_dpll(intel_crtc);
4971 
4972         if (intel_crtc->config->has_dp_encoder)
4973                 intel_dp_set_m_n(intel_crtc, M1_N1);
4974 
4975         intel_set_pipe_timings(intel_crtc);
4976 
4977         if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4978                 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4979                            intel_crtc->config->pixel_multiplier - 1);
4980         }
4981 
4982         if (intel_crtc->config->has_pch_encoder) {
4983                 intel_cpu_transcoder_set_m_n(intel_crtc,
4984                                      &intel_crtc->config->fdi_m_n, NULL);
4985         }
4986 
4987         haswell_set_pipeconf(crtc);
4988 
4989         intel_set_pipe_csc(crtc);
4990 
4991         intel_crtc->active = true;
4992 
4993         if (intel_crtc->config->has_pch_encoder)
4994                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4995         else
4996                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4997 
4998         for_each_encoder_on_crtc(dev, crtc, encoder) {
4999                 if (encoder->pre_enable)
5000                         encoder->pre_enable(encoder);
5001         }
5002 
5003         if (intel_crtc->config->has_pch_encoder)
5004                 dev_priv->display.fdi_link_train(crtc);
5005 
5006         if (!intel_crtc->config->has_dsi_encoder)
5007                 intel_ddi_enable_pipe_clock(intel_crtc);
5008 
5009         if (INTEL_INFO(dev)->gen >= 9)
5010                 skylake_pfit_enable(intel_crtc);
5011         else
5012                 ironlake_pfit_enable(intel_crtc);
5013 
5014         /*
5015          * On ILK+ LUT must be loaded before the pipe is running but with
5016          * clocks enabled
5017          */
5018         intel_crtc_load_lut(crtc);
5019 
5020         intel_ddi_set_pipe_settings(crtc);
5021         if (!intel_crtc->config->has_dsi_encoder)
5022                 intel_ddi_enable_transcoder_func(crtc);
5023 
5024         intel_update_watermarks(crtc);
5025         intel_enable_pipe(intel_crtc);
5026 
5027         if (intel_crtc->config->has_pch_encoder)
5028                 lpt_pch_enable(crtc);
5029 
5030         if (intel_crtc->config->dp_encoder_is_mst)
5031                 intel_ddi_set_vc_payload_alloc(crtc, true);
5032 
5033         assert_vblank_disabled(crtc);
5034         drm_crtc_vblank_on(crtc);
5035 
5036         for_each_encoder_on_crtc(dev, crtc, encoder) {
5037                 encoder->enable(encoder);
5038                 intel_opregion_notify_encoder(encoder, true);
5039         }
5040 
5041         if (intel_crtc->config->has_pch_encoder) {
5042                 intel_wait_for_vblank(dev, pipe);
5043                 intel_wait_for_vblank(dev, pipe);
5044                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5045                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5046                                                       true);
5047         }
5048 
5049         /* If we change the relative order between pipe/planes enabling, we need
5050          * to change the workaround. */
5051         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5052         if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5053                 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5054                 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5055         }
5056 }
5057 
5058 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5059 {
5060         struct drm_device *dev = crtc->base.dev;
5061         struct drm_i915_private *dev_priv = dev->dev_private;
5062         int pipe = crtc->pipe;
5063 
5064         /* To avoid upsetting the power well on haswell only disable the pfit if
5065          * it's in use. The hw state code will make sure we get this right. */
5066         if (force || crtc->config->pch_pfit.enabled) {
5067                 I915_WRITE(PF_CTL(pipe), 0);
5068                 I915_WRITE(PF_WIN_POS(pipe), 0);
5069                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5070         }
5071 }
5072 
5073 static void ironlake_crtc_disable(struct drm_crtc *crtc)
5074 {
5075         struct drm_device *dev = crtc->dev;
5076         struct drm_i915_private *dev_priv = dev->dev_private;
5077         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5078         struct intel_encoder *encoder;
5079         int pipe = intel_crtc->pipe;
5080 
5081         if (intel_crtc->config->has_pch_encoder)
5082                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5083 
5084         for_each_encoder_on_crtc(dev, crtc, encoder)
5085                 encoder->disable(encoder);
5086 
5087         drm_crtc_vblank_off(crtc);
5088         assert_vblank_disabled(crtc);
5089 
5090         /*
5091          * Sometimes spurious CPU pipe underruns happen when the
5092          * pipe is already disabled, but FDI RX/TX is still enabled.
5093          * Happens at least with VGA+HDMI cloning. Suppress them.
5094          */
5095         if (intel_crtc->config->has_pch_encoder)
5096                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5097 
5098         intel_disable_pipe(intel_crtc);
5099 
5100         ironlake_pfit_disable(intel_crtc, false);
5101 
5102         if (intel_crtc->config->has_pch_encoder) {
5103                 ironlake_fdi_disable(crtc);
5104                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5105         }
5106 
5107         for_each_encoder_on_crtc(dev, crtc, encoder)
5108                 if (encoder->post_disable)
5109                         encoder->post_disable(encoder);
5110 
5111         if (intel_crtc->config->has_pch_encoder) {
5112                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5113 
5114                 if (HAS_PCH_CPT(dev)) {
5115                         i915_reg_t reg;
5116                         u32 temp;
5117 
5118                         /* disable TRANS_DP_CTL */
5119                         reg = TRANS_DP_CTL(pipe);
5120                         temp = I915_READ(reg);
5121                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5122                                   TRANS_DP_PORT_SEL_MASK);
5123                         temp |= TRANS_DP_PORT_SEL_NONE;
5124                         I915_WRITE(reg, temp);
5125 
5126                         /* disable DPLL_SEL */
5127                         temp = I915_READ(PCH_DPLL_SEL);
5128                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5129                         I915_WRITE(PCH_DPLL_SEL, temp);
5130                 }
5131 
5132                 ironlake_fdi_pll_disable(intel_crtc);
5133         }
5134 
5135         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5136 }
5137 
5138 static void haswell_crtc_disable(struct drm_crtc *crtc)
5139 {
5140         struct drm_device *dev = crtc->dev;
5141         struct drm_i915_private *dev_priv = dev->dev_private;
5142         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5143         struct intel_encoder *encoder;
5144         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5145 
5146         if (intel_crtc->config->has_pch_encoder)
5147                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5148                                                       false);
5149 
5150         for_each_encoder_on_crtc(dev, crtc, encoder) {
5151                 intel_opregion_notify_encoder(encoder, false);
5152                 encoder->disable(encoder);
5153         }
5154 
5155         drm_crtc_vblank_off(crtc);
5156         assert_vblank_disabled(crtc);
5157 
5158         intel_disable_pipe(intel_crtc);
5159 
5160         if (intel_crtc->config->dp_encoder_is_mst)
5161                 intel_ddi_set_vc_payload_alloc(crtc, false);
5162 
5163         if (!intel_crtc->config->has_dsi_encoder)
5164                 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5165 
5166         if (INTEL_INFO(dev)->gen >= 9)
5167                 skylake_scaler_disable(intel_crtc);
5168         else
5169                 ironlake_pfit_disable(intel_crtc, false);
5170 
5171         if (!intel_crtc->config->has_dsi_encoder)
5172                 intel_ddi_disable_pipe_clock(intel_crtc);
5173 
5174         for_each_encoder_on_crtc(dev, crtc, encoder)
5175                 if (encoder->post_disable)
5176                         encoder->post_disable(encoder);
5177 
5178         if (intel_crtc->config->has_pch_encoder) {
5179                 lpt_disable_pch_transcoder(dev_priv);
5180                 lpt_disable_iclkip(dev_priv);
5181                 intel_ddi_fdi_disable(crtc);
5182 
5183                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5184                                                       true);
5185         }
5186 }
5187 
5188 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5189 {
5190         struct drm_device *dev = crtc->base.dev;
5191         struct drm_i915_private *dev_priv = dev->dev_private;
5192         struct intel_crtc_state *pipe_config = crtc->config;
5193 
5194         if (!pipe_config->gmch_pfit.control)
5195                 return;
5196 
5197         /*
5198          * The panel fitter should only be adjusted whilst the pipe is disabled,
5199          * according to register description and PRM.
5200          */
5201         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5202         assert_pipe_disabled(dev_priv, crtc->pipe);
5203 
5204         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5205         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5206 
5207         /* Border color in case we don't scale up to the full screen. Black by
5208          * default, change to something else for debugging. */
5209         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5210 }
5211 
5212 static enum intel_display_power_domain port_to_power_domain(enum port port)
5213 {
5214         switch (port) {
5215         case PORT_A:
5216                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5217         case PORT_B:
5218                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5219         case PORT_C:
5220                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5221         case PORT_D:
5222                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5223         case PORT_E:
5224                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5225         default:
5226                 MISSING_CASE(port);
5227                 return POWER_DOMAIN_PORT_OTHER;
5228         }
5229 }
5230 
5231 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5232 {
5233         switch (port) {
5234         case PORT_A:
5235                 return POWER_DOMAIN_AUX_A;
5236         case PORT_B:
5237                 return POWER_DOMAIN_AUX_B;
5238         case PORT_C:
5239                 return POWER_DOMAIN_AUX_C;
5240         case PORT_D:
5241                 return POWER_DOMAIN_AUX_D;
5242         case PORT_E:
5243                 /* FIXME: Check VBT for actual wiring of PORT E */
5244                 return POWER_DOMAIN_AUX_D;
5245         default:
5246                 MISSING_CASE(port);
5247                 return POWER_DOMAIN_AUX_A;
5248         }
5249 }
5250 
5251 enum intel_display_power_domain
5252 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5253 {
5254         struct drm_device *dev = intel_encoder->base.dev;
5255         struct intel_digital_port *intel_dig_port;
5256 
5257         switch (intel_encoder->type) {
5258         case INTEL_OUTPUT_UNKNOWN:
5259                 /* Only DDI platforms should ever use this output type */
5260                 WARN_ON_ONCE(!HAS_DDI(dev));
5261         case INTEL_OUTPUT_DISPLAYPORT:
5262         case INTEL_OUTPUT_HDMI:
5263         case INTEL_OUTPUT_EDP:
5264                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5265                 return port_to_power_domain(intel_dig_port->port);
5266         case INTEL_OUTPUT_DP_MST:
5267                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5268                 return port_to_power_domain(intel_dig_port->port);
5269         case INTEL_OUTPUT_ANALOG:
5270                 return POWER_DOMAIN_PORT_CRT;
5271         case INTEL_OUTPUT_DSI:
5272                 return POWER_DOMAIN_PORT_DSI;
5273         default:
5274                 return POWER_DOMAIN_PORT_OTHER;
5275         }
5276 }
5277 
5278 enum intel_display_power_domain
5279 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5280 {
5281         struct drm_device *dev = intel_encoder->base.dev;
5282         struct intel_digital_port *intel_dig_port;
5283 
5284         switch (intel_encoder->type) {
5285         case INTEL_OUTPUT_UNKNOWN:
5286         case INTEL_OUTPUT_HDMI:
5287                 /*
5288                  * Only DDI platforms should ever use these output types.
5289                  * We can get here after the HDMI detect code has already set
5290                  * the type of the shared encoder. Since we can't be sure
5291                  * what's the status of the given connectors, play safe and
5292                  * run the DP detection too.
5293                  */
5294                 WARN_ON_ONCE(!HAS_DDI(dev));
5295         case INTEL_OUTPUT_DISPLAYPORT:
5296         case INTEL_OUTPUT_EDP:
5297                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5298                 return port_to_aux_power_domain(intel_dig_port->port);
5299         case INTEL_OUTPUT_DP_MST:
5300                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5301                 return port_to_aux_power_domain(intel_dig_port->port);
5302         default:
5303                 MISSING_CASE(intel_encoder->type);
5304                 return POWER_DOMAIN_AUX_A;
5305         }
5306 }
5307 
5308 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5309                                             struct intel_crtc_state *crtc_state)
5310 {
5311         struct drm_device *dev = crtc->dev;
5312         struct drm_encoder *encoder;
5313         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5314         enum pipe pipe = intel_crtc->pipe;
5315         unsigned long mask;
5316         enum transcoder transcoder = crtc_state->cpu_transcoder;
5317 
5318         if (!crtc_state->base.active)
5319                 return 0;
5320 
5321         mask = BIT(POWER_DOMAIN_PIPE(pipe));
5322         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5323         if (crtc_state->pch_pfit.enabled ||
5324             crtc_state->pch_pfit.force_thru)
5325                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5326 
5327         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5328                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5329 
5330                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
5331         }
5332 
5333         return mask;
5334 }
5335 
5336 static unsigned long
5337 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5338                                struct intel_crtc_state *crtc_state)
5339 {
5340         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5341         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5342         enum intel_display_power_domain domain;
5343         unsigned long domains, new_domains, old_domains;
5344 
5345         old_domains = intel_crtc->enabled_power_domains;
5346         intel_crtc->enabled_power_domains = new_domains =
5347                 get_crtc_power_domains(crtc, crtc_state);
5348 
5349         domains = new_domains & ~old_domains;
5350 
5351         for_each_power_domain(domain, domains)
5352                 intel_display_power_get(dev_priv, domain);
5353 
5354         return old_domains & ~new_domains;
5355 }
5356 
5357 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5358                                       unsigned long domains)
5359 {
5360         enum intel_display_power_domain domain;
5361 
5362         for_each_power_domain(domain, domains)
5363                 intel_display_power_put(dev_priv, domain);
5364 }
5365 
5366 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5367 {
5368         int max_cdclk_freq = dev_priv->max_cdclk_freq;
5369 
5370         if (INTEL_INFO(dev_priv)->gen >= 9 ||
5371             IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5372                 return max_cdclk_freq;
5373         else if (IS_CHERRYVIEW(dev_priv))
5374                 return max_cdclk_freq*95/100;
5375         else if (INTEL_INFO(dev_priv)->gen < 4)
5376                 return 2*max_cdclk_freq*90/100;
5377         else
5378                 return max_cdclk_freq*90/100;
5379 }
5380 
5381 static void intel_update_max_cdclk(struct drm_device *dev)
5382 {
5383         struct drm_i915_private *dev_priv = dev->dev_private;
5384 
5385         if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5386                 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5387 
5388                 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5389                         dev_priv->max_cdclk_freq = 675000;
5390                 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5391                         dev_priv->max_cdclk_freq = 540000;
5392                 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5393                         dev_priv->max_cdclk_freq = 450000;
5394                 else
5395                         dev_priv->max_cdclk_freq = 337500;
5396         } else if (IS_BROADWELL(dev))  {
5397                 /*
5398                  * FIXME with extra cooling we can allow
5399                  * 540 MHz for ULX and 675 Mhz for ULT.
5400                  * How can we know if extra cooling is
5401                  * available? PCI ID, VTB, something else?
5402                  */
5403                 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5404                         dev_priv->max_cdclk_freq = 450000;
5405                 else if (IS_BDW_ULX(dev))
5406                         dev_priv->max_cdclk_freq = 450000;
5407                 else if (IS_BDW_ULT(dev))
5408                         dev_priv->max_cdclk_freq = 540000;
5409                 else
5410                         dev_priv->max_cdclk_freq = 675000;
5411         } else if (IS_CHERRYVIEW(dev)) {
5412                 dev_priv->max_cdclk_freq = 320000;
5413         } else if (IS_VALLEYVIEW(dev)) {
5414                 dev_priv->max_cdclk_freq = 400000;
5415         } else {
5416                 /* otherwise assume cdclk is fixed */
5417                 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5418         }
5419 
5420         dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5421 
5422         DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5423                          dev_priv->max_cdclk_freq);
5424 
5425         DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5426                          dev_priv->max_dotclk_freq);
5427 }
5428 
5429 static void intel_update_cdclk(struct drm_device *dev)
5430 {
5431         struct drm_i915_private *dev_priv = dev->dev_private;
5432 
5433         dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5434         DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5435                          dev_priv->cdclk_freq);
5436 
5437         /*
5438          * Program the gmbus_freq based on the cdclk frequency.
5439          * BSpec erroneously claims we should aim for 4MHz, but
5440          * in fact 1MHz is the correct frequency.
5441          */
5442         if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5443                 /*
5444                  * Program the gmbus_freq based on the cdclk frequency.
5445                  * BSpec erroneously claims we should aim for 4MHz, but
5446                  * in fact 1MHz is the correct frequency.
5447                  */
5448                 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5449         }
5450 
5451         if (dev_priv->max_cdclk_freq == 0)
5452                 intel_update_max_cdclk(dev);
5453 }
5454 
5455 static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5456 {
5457         struct drm_i915_private *dev_priv = dev->dev_private;
5458         uint32_t divider;
5459         uint32_t ratio;
5460         uint32_t current_freq;
5461         int ret;
5462 
5463         /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5464         switch (frequency) {
5465         case 144000:
5466                 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5467                 ratio = BXT_DE_PLL_RATIO(60);
5468                 break;
5469         case 288000:
5470                 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5471                 ratio = BXT_DE_PLL_RATIO(60);
5472                 break;
5473         case 384000:
5474                 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5475                 ratio = BXT_DE_PLL_RATIO(60);
5476                 break;
5477         case 576000:
5478                 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5479                 ratio = BXT_DE_PLL_RATIO(60);
5480                 break;
5481         case 624000:
5482                 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5483                 ratio = BXT_DE_PLL_RATIO(65);
5484                 break;
5485         case 19200:
5486                 /*
5487                  * Bypass frequency with DE PLL disabled. Init ratio, divider
5488                  * to suppress GCC warning.
5489                  */
5490                 ratio = 0;
5491                 divider = 0;
5492                 break;
5493         default:
5494                 DRM_ERROR("unsupported CDCLK freq %d", frequency);
5495 
5496                 return;
5497         }
5498 
5499         mutex_lock(&dev_priv->rps.hw_lock);
5500         /* Inform power controller of upcoming frequency change */
5501         ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5502                                       0x80000000);
5503         mutex_unlock(&dev_priv->rps.hw_lock);
5504 
5505         if (ret) {
5506                 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5507                           ret, frequency);
5508                 return;
5509         }
5510 
5511         current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5512         /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5513         current_freq = current_freq * 500 + 1000;
5514 
5515         /*
5516          * DE PLL has to be disabled when
5517          * - setting to 19.2MHz (bypass, PLL isn't used)
5518          * - before setting to 624MHz (PLL needs toggling)
5519          * - before setting to any frequency from 624MHz (PLL needs toggling)
5520          */
5521         if (frequency == 19200 || frequency == 624000 ||
5522             current_freq == 624000) {
5523                 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5524                 /* Timeout 200us */
5525                 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5526                              1))
5527                         DRM_ERROR("timout waiting for DE PLL unlock\n");
5528         }
5529 
5530         if (frequency != 19200) {
5531                 uint32_t val;
5532 
5533                 val = I915_READ(BXT_DE_PLL_CTL);
5534                 val &= ~BXT_DE_PLL_RATIO_MASK;
5535                 val |= ratio;
5536                 I915_WRITE(BXT_DE_PLL_CTL, val);
5537 
5538                 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5539                 /* Timeout 200us */
5540                 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5541                         DRM_ERROR("timeout waiting for DE PLL lock\n");
5542 
5543                 val = I915_READ(CDCLK_CTL);
5544                 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5545                 val |= divider;
5546                 /*
5547                  * Disable SSA Precharge when CD clock frequency < 500 MHz,
5548                  * enable otherwise.
5549                  */
5550                 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5551                 if (frequency >= 500000)
5552                         val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5553 
5554                 val &= ~CDCLK_FREQ_DECIMAL_MASK;
5555                 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5556                 val |= (frequency - 1000) / 500;
5557                 I915_WRITE(CDCLK_CTL, val);
5558         }
5559 
5560         mutex_lock(&dev_priv->rps.hw_lock);
5561         ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5562                                       DIV_ROUND_UP(frequency, 25000));
5563         mutex_unlock(&dev_priv->rps.hw_lock);
5564 
5565         if (ret) {
5566                 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5567                           ret, frequency);
5568                 return;
5569         }
5570 
5571         intel_update_cdclk(dev);
5572 }
5573 
5574 void broxton_init_cdclk(struct drm_device *dev)
5575 {
5576         struct drm_i915_private *dev_priv = dev->dev_private;
5577         uint32_t val;
5578 
5579         /*
5580          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5581          * or else the reset will hang because there is no PCH to respond.
5582          * Move the handshake programming to initialization sequence.
5583          * Previously was left up to BIOS.
5584          */
5585         val = I915_READ(HSW_NDE_RSTWRN_OPT);
5586         val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5587         I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5588 
5589         /* Enable PG1 for cdclk */
5590         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5591 
5592         /* check if cd clock is enabled */
5593         if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5594                 DRM_DEBUG_KMS("Display already initialized\n");
5595                 return;
5596         }
5597 
5598         /*
5599          * FIXME:
5600          * - The initial CDCLK needs to be read from VBT.
5601          *   Need to make this change after VBT has changes for BXT.
5602          * - check if setting the max (or any) cdclk freq is really necessary
5603          *   here, it belongs to modeset time
5604          */
5605         broxton_set_cdclk(dev, 624000);
5606 
5607         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5608         POSTING_READ(DBUF_CTL);
5609 
5610         udelay(10);
5611 
5612         if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5613                 DRM_ERROR("DBuf power enable timeout!\n");
5614 }
5615 
5616 void broxton_uninit_cdclk(struct drm_device *dev)
5617 {
5618         struct drm_i915_private *dev_priv = dev->dev_private;
5619 
5620         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5621         POSTING_READ(DBUF_CTL);
5622 
5623         udelay(10);
5624 
5625         if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5626                 DRM_ERROR("DBuf power disable timeout!\n");
5627 
5628         /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5629         broxton_set_cdclk(dev, 19200);
5630 
5631         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5632 }
5633 
5634 static const struct skl_cdclk_entry {
5635         unsigned int freq;
5636         unsigned int vco;
5637 } skl_cdclk_frequencies[] = {
5638         { .freq = 308570, .vco = 8640 },
5639         { .freq = 337500, .vco = 8100 },
5640         { .freq = 432000, .vco = 8640 },
5641         { .freq = 450000, .vco = 8100 },
5642         { .freq = 540000, .vco = 8100 },
5643         { .freq = 617140, .vco = 8640 },
5644         { .freq = 675000, .vco = 8100 },
5645 };
5646 
5647 static unsigned int skl_cdclk_decimal(unsigned int freq)
5648 {
5649         return (freq - 1000) / 500;
5650 }
5651 
5652 static unsigned int skl_cdclk_get_vco(unsigned int freq)
5653 {
5654         unsigned int i;
5655 
5656         for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5657                 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5658 
5659                 if (e->freq == freq)
5660                         return e->vco;
5661         }
5662 
5663         return 8100;
5664 }
5665 
5666 static void
5667 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5668 {
5669         unsigned int min_freq;
5670         u32 val;
5671 
5672         /* select the minimum CDCLK before enabling DPLL 0 */
5673         val = I915_READ(CDCLK_CTL);
5674         val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5675         val |= CDCLK_FREQ_337_308;
5676 
5677         if (required_vco == 8640)
5678                 min_freq = 308570;
5679         else
5680                 min_freq = 337500;
5681 
5682         val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5683 
5684         I915_WRITE(CDCLK_CTL, val);
5685         POSTING_READ(CDCLK_CTL);
5686 
5687         /*
5688          * We always enable DPLL0 with the lowest link rate possible, but still
5689          * taking into account the VCO required to operate the eDP panel at the
5690          * desired frequency. The usual DP link rates operate with a VCO of
5691          * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5692          * The modeset code is responsible for the selection of the exact link
5693          * rate later on, with the constraint of choosing a frequency that
5694          * works with required_vco.
5695          */
5696         val = I915_READ(DPLL_CTRL1);
5697 
5698         val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5699                  DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5700         val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5701         if (required_vco == 8640)
5702                 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5703                                             SKL_DPLL0);
5704         else
5705                 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5706                                             SKL_DPLL0);
5707 
5708         I915_WRITE(DPLL_CTRL1, val);
5709         POSTING_READ(DPLL_CTRL1);
5710 
5711         I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5712 
5713         if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5714                 DRM_ERROR("DPLL0 not locked\n");
5715 }
5716 
5717 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5718 {
5719         int ret;
5720         u32 val;
5721 
5722         /* inform PCU we want to change CDCLK */
5723         val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5724         mutex_lock(&dev_priv->rps.hw_lock);
5725         ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5726         mutex_unlock(&dev_priv->rps.hw_lock);
5727 
5728         return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5729 }
5730 
5731 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5732 {
5733         unsigned int i;
5734 
5735         for (i = 0; i < 15; i++) {
5736                 if (skl_cdclk_pcu_ready(dev_priv))
5737                         return true;
5738                 udelay(10);
5739         }
5740 
5741         return false;
5742 }
5743 
5744 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5745 {
5746         struct drm_device *dev = dev_priv->dev;
5747         u32 freq_select, pcu_ack;
5748 
5749         DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5750 
5751         if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5752                 DRM_ERROR("failed to inform PCU about cdclk change\n");
5753                 return;
5754         }
5755 
5756         /* set CDCLK_CTL */
5757         switch(freq) {
5758         case 450000:
5759         case 432000:
5760                 freq_select = CDCLK_FREQ_450_432;
5761                 pcu_ack = 1;
5762                 break;
5763         case 540000:
5764                 freq_select = CDCLK_FREQ_540;
5765                 pcu_ack = 2;
5766                 break;
5767         case 308570:
5768         case 337500:
5769         default:
5770                 freq_select = CDCLK_FREQ_337_308;
5771                 pcu_ack = 0;
5772                 break;
5773         case 617140:
5774         case 675000:
5775                 freq_select = CDCLK_FREQ_675_617;
5776                 pcu_ack = 3;
5777                 break;
5778         }
5779 
5780         I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5781         POSTING_READ(CDCLK_CTL);
5782 
5783         /* inform PCU of the change */
5784         mutex_lock(&dev_priv->rps.hw_lock);
5785         sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5786         mutex_unlock(&dev_priv->rps.hw_lock);
5787 
5788         intel_update_cdclk(dev);
5789 }
5790 
5791 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5792 {
5793         /* disable DBUF power */
5794         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5795         POSTING_READ(DBUF_CTL);
5796 
5797         udelay(10);
5798 
5799         if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5800                 DRM_ERROR("DBuf power disable timeout\n");
5801 
5802         /* disable DPLL0 */
5803         I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5804         if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5805                 DRM_ERROR("Couldn't disable DPLL0\n");
5806 }
5807 
5808 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5809 {
5810         unsigned int required_vco;
5811 
5812         /* DPLL0 not enabled (happens on early BIOS versions) */
5813         if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5814                 /* enable DPLL0 */
5815                 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5816                 skl_dpll0_enable(dev_priv, required_vco);
5817         }
5818 
5819         /* set CDCLK to the frequency the BIOS chose */
5820         skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5821 
5822         /* enable DBUF power */
5823         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5824         POSTING_READ(DBUF_CTL);
5825 
5826         udelay(10);
5827 
5828         if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5829                 DRM_ERROR("DBuf power enable timeout\n");
5830 }
5831 
5832 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5833 {
5834         uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5835         uint32_t cdctl = I915_READ(CDCLK_CTL);
5836         int freq = dev_priv->skl_boot_cdclk;
5837 
5838         /*
5839          * check if the pre-os intialized the display
5840          * There is SWF18 scratchpad register defined which is set by the
5841          * pre-os which can be used by the OS drivers to check the status
5842          */
5843         if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5844                 goto sanitize;
5845 
5846         /* Is PLL enabled and locked ? */
5847         if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5848                 goto sanitize;
5849 
5850         /* DPLL okay; verify the cdclock
5851          *
5852          * Noticed in some instances that the freq selection is correct but
5853          * decimal part is programmed wrong from BIOS where pre-os does not
5854          * enable display. Verify the same as well.
5855          */
5856         if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5857                 /* All well; nothing to sanitize */
5858                 return false;
5859 sanitize:
5860         /*
5861          * As of now initialize with max cdclk till
5862          * we get dynamic cdclk support
5863          * */
5864         dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5865         skl_init_cdclk(dev_priv);
5866 
5867         /* we did have to sanitize */
5868         return true;
5869 }
5870 
5871 /* Adjust CDclk dividers to allow high res or save power if possible */
5872 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5873 {
5874         struct drm_i915_private *dev_priv = dev->dev_private;
5875         u32 val, cmd;
5876 
5877         WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5878                                         != dev_priv->cdclk_freq);
5879 
5880         if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5881                 cmd = 2;
5882         else if (cdclk == 266667)
5883                 cmd = 1;
5884         else
5885                 cmd = 0;
5886 
5887         mutex_lock(&dev_priv->rps.hw_lock);
5888         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5889         val &= ~DSPFREQGUAR_MASK;
5890         val |= (cmd << DSPFREQGUAR_SHIFT);
5891         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5892         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5893                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5894                      50)) {
5895                 DRM_ERROR("timed out waiting for CDclk change\n");
5896         }
5897         mutex_unlock(&dev_priv->rps.hw_lock);
5898 
5899         mutex_lock(&dev_priv->sb_lock);
5900 
5901         if (cdclk == 400000) {
5902                 u32 divider;
5903 
5904                 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5905 
5906                 /* adjust cdclk divider */
5907                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5908                 val &= ~CCK_FREQUENCY_VALUES;
5909                 val |= divider;
5910                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5911 
5912                 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5913                               CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5914                              50))
5915                         DRM_ERROR("timed out waiting for CDclk change\n");
5916         }
5917 
5918         /* adjust self-refresh exit latency value */
5919         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5920         val &= ~0x7f;
5921 
5922         /*
5923          * For high bandwidth configs, we set a higher latency in the bunit
5924          * so that the core display fetch happens in time to avoid underruns.
5925          */
5926         if (cdclk == 400000)
5927                 val |= 4500 / 250; /* 4.5 usec */
5928         else
5929                 val |= 3000 / 250; /* 3.0 usec */
5930         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5931 
5932         mutex_unlock(&dev_priv->sb_lock);
5933 
5934         intel_update_cdclk(dev);
5935 }
5936 
5937 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5938 {
5939         struct drm_i915_private *dev_priv = dev->dev_private;
5940         u32 val, cmd;
5941 
5942         WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5943                                                 != dev_priv->cdclk_freq);
5944 
5945         switch (cdclk) {
5946         case 333333:
5947         case 320000:
5948         case 266667:
5949         case 200000:
5950                 break;
5951         default:
5952                 MISSING_CASE(cdclk);
5953                 return;
5954         }
5955 
5956         /*
5957          * Specs are full of misinformation, but testing on actual
5958          * hardware has shown that we just need to write the desired
5959          * CCK divider into the Punit register.
5960          */
5961         cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5962 
5963         mutex_lock(&dev_priv->rps.hw_lock);
5964         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5965         val &= ~DSPFREQGUAR_MASK_CHV;
5966         val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5967         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5968         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5969                       DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5970                      50)) {
5971                 DRM_ERROR("timed out waiting for CDclk change\n");
5972         }
5973         mutex_unlock(&dev_priv->rps.hw_lock);
5974 
5975         intel_update_cdclk(dev);
5976 }
5977 
5978 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5979                                  int max_pixclk)
5980 {
5981         int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5982         int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5983 
5984         /*
5985          * Really only a few cases to deal with, as only 4 CDclks are supported:
5986          *   200MHz
5987          *   267MHz
5988          *   320/333MHz (depends on HPLL freq)
5989          *   400MHz (VLV only)
5990          * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5991          * of the lower bin and adjust if needed.
5992          *
5993          * We seem to get an unstable or solid color picture at 200MHz.
5994          * Not sure what's wrong. For now use 200MHz only when all pipes
5995          * are off.
5996          */
5997         if (!IS_CHERRYVIEW(dev_priv) &&
5998             max_pixclk > freq_320*limit/100)
5999                 return 400000;
6000         else if (max_pixclk > 266667*limit/100)
6001                 return freq_320;
6002         else if (max_pixclk > 0)
6003                 return 266667;
6004         else
6005                 return 200000;
6006 }
6007 
6008 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6009                               int max_pixclk)
6010 {
6011         /*
6012          * FIXME:
6013          * - remove the guardband, it's not needed on BXT
6014          * - set 19.2MHz bypass frequency if there are no active pipes
6015          */
6016         if (max_pixclk > 576000*9/10)
6017                 return 624000;
6018         else if (max_pixclk > 384000*9/10)
6019                 return 576000;
6020         else if (max_pixclk > 288000*9/10)
6021                 return 384000;
6022         else if (max_pixclk > 144000*9/10)
6023                 return 288000;
6024         else
6025                 return 144000;
6026 }
6027 
6028 /* Compute the max pixel clock for new configuration. */
6029 static int intel_mode_max_pixclk(struct drm_device *dev,
6030                                  struct drm_atomic_state *state)
6031 {
6032         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6033         struct drm_i915_private *dev_priv = dev->dev_private;
6034         struct drm_crtc *crtc;
6035         struct drm_crtc_state *crtc_state;
6036         unsigned max_pixclk = 0, i;
6037         enum pipe pipe;
6038 
6039         memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6040                sizeof(intel_state->min_pixclk));
6041 
6042         for_each_crtc_in_state(state, crtc, crtc_state, i) {
6043                 int pixclk = 0;
6044 
6045                 if (crtc_state->enable)
6046                         pixclk = crtc_state->adjusted_mode.crtc_clock;
6047 
6048                 intel_state->min_pixclk[i] = pixclk;
6049         }
6050 
6051         for_each_pipe(dev_priv, pipe)
6052                 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6053 
6054         return max_pixclk;
6055 }
6056 
6057 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6058 {
6059         struct drm_device *dev = state->dev;
6060         struct drm_i915_private *dev_priv = dev->dev_private;
6061         int max_pixclk = intel_mode_max_pixclk(dev, state);
6062         struct intel_atomic_state *intel_state =
6063                 to_intel_atomic_state(state);
6064 
6065         if (max_pixclk < 0)
6066                 return max_pixclk;
6067 
6068         intel_state->cdclk = intel_state->dev_cdclk =
6069                 valleyview_calc_cdclk(dev_priv, max_pixclk);
6070 
6071         if (!intel_state->active_crtcs)
6072                 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6073 
6074         return 0;
6075 }
6076 
6077 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6078 {
6079         struct drm_device *dev = state->dev;
6080         struct drm_i915_private *dev_priv = dev->dev_private;
6081         int max_pixclk = intel_mode_max_pixclk(dev, state);
6082         struct intel_atomic_state *intel_state =
6083                 to_intel_atomic_state(state);
6084 
6085         if (max_pixclk < 0)
6086                 return max_pixclk;
6087 
6088         intel_state->cdclk = intel_state->dev_cdclk =
6089                 broxton_calc_cdclk(dev_priv, max_pixclk);
6090 
6091         if (!intel_state->active_crtcs)
6092                 intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
6093 
6094         return 0;
6095 }
6096 
6097 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6098 {
6099         unsigned int credits, default_credits;
6100 
6101         if (IS_CHERRYVIEW(dev_priv))
6102                 default_credits = PFI_CREDIT(12);
6103         else
6104                 default_credits = PFI_CREDIT(8);
6105 
6106         if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6107                 /* CHV suggested value is 31 or 63 */
6108                 if (IS_CHERRYVIEW(dev_priv))
6109                         credits = PFI_CREDIT_63;
6110                 else
6111                         credits = PFI_CREDIT(15);
6112         } else {
6113                 credits = default_credits;
6114         }
6115 
6116         /*
6117          * WA - write default credits before re-programming
6118          * FIXME: should we also set the resend bit here?
6119          */
6120         I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6121                    default_credits);
6122 
6123         I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6124                    credits | PFI_CREDIT_RESEND);
6125 
6126         /*
6127          * FIXME is this guaranteed to clear
6128          * immediately or should we poll for it?
6129          */
6130         WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6131 }
6132 
6133 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6134 {
6135         struct drm_device *dev = old_state->dev;
6136         struct drm_i915_private *dev_priv = dev->dev_private;
6137         struct intel_atomic_state *old_intel_state =
6138                 to_intel_atomic_state(old_state);
6139         unsigned req_cdclk = old_intel_state->dev_cdclk;
6140 
6141         /*
6142          * FIXME: We can end up here with all power domains off, yet
6143          * with a CDCLK frequency other than the minimum. To account
6144          * for this take the PIPE-A power domain, which covers the HW
6145          * blocks needed for the following programming. This can be
6146          * removed once it's guaranteed that we get here either with
6147          * the minimum CDCLK set, or the required power domains
6148          * enabled.
6149          */
6150         intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6151 
6152         if (IS_CHERRYVIEW(dev))
6153                 cherryview_set_cdclk(dev, req_cdclk);
6154         else
6155                 valleyview_set_cdclk(dev, req_cdclk);
6156 
6157         vlv_program_pfi_credits(dev_priv);
6158 
6159         intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6160 }
6161 
6162 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6163 {
6164         struct drm_device *dev = crtc->dev;
6165         struct drm_i915_private *dev_priv = to_i915(dev);
6166         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6167         struct intel_encoder *encoder;
6168         int pipe = intel_crtc->pipe;
6169 
6170         if (WARN_ON(intel_crtc->active))
6171                 return;
6172 
6173         if (intel_crtc->config->has_dp_encoder)
6174                 intel_dp_set_m_n(intel_crtc, M1_N1);
6175 
6176         intel_set_pipe_timings(intel_crtc);
6177 
6178         if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6179                 struct drm_i915_private *dev_priv = dev->dev_private;
6180 
6181                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6182                 I915_WRITE(CHV_CANVAS(pipe), 0);
6183         }
6184 
6185         i9xx_set_pipeconf(intel_crtc);
6186 
6187         intel_crtc->active = true;
6188 
6189         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6190 
6191         for_each_encoder_on_crtc(dev, crtc, encoder)
6192                 if (encoder->pre_pll_enable)
6193                         encoder->pre_pll_enable(encoder);
6194 
6195         if (!intel_crtc->config->has_dsi_encoder) {
6196                 if (IS_CHERRYVIEW(dev)) {
6197                         chv_prepare_pll(intel_crtc, intel_crtc->config);
6198                         chv_enable_pll(intel_crtc, intel_crtc->config);
6199                 } else {
6200                         vlv_prepare_pll(intel_crtc, intel_crtc->config);
6201                         vlv_enable_pll(intel_crtc, intel_crtc->config);
6202                 }
6203         }
6204 
6205         for_each_encoder_on_crtc(dev, crtc, encoder)
6206                 if (encoder->pre_enable)
6207                         encoder->pre_enable(encoder);
6208 
6209         i9xx_pfit_enable(intel_crtc);
6210 
6211         intel_crtc_load_lut(crtc);
6212 
6213         intel_enable_pipe(intel_crtc);
6214 
6215         assert_vblank_disabled(crtc);
6216         drm_crtc_vblank_on(crtc);
6217 
6218         for_each_encoder_on_crtc(dev, crtc, encoder)
6219                 encoder->enable(encoder);
6220 }
6221 
6222 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6223 {
6224         struct drm_device *dev = crtc->base.dev;
6225         struct drm_i915_private *dev_priv = dev->dev_private;
6226 
6227         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6228         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6229 }
6230 
6231 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6232 {
6233         struct drm_device *dev = crtc->dev;
6234         struct drm_i915_private *dev_priv = to_i915(dev);
6235         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6236         struct intel_encoder *encoder;
6237         int pipe = intel_crtc->pipe;
6238 
6239         if (WARN_ON(intel_crtc->active))
6240                 return;
6241 
6242         i9xx_set_pll_dividers(intel_crtc);
6243 
6244         if (intel_crtc->config->has_dp_encoder)
6245                 intel_dp_set_m_n(intel_crtc, M1_N1);
6246 
6247         intel_set_pipe_timings(intel_crtc);
6248 
6249         i9xx_set_pipeconf(intel_crtc);
6250 
6251         intel_crtc->active = true;
6252 
6253         if (!IS_GEN2(dev))
6254                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6255 
6256         for_each_encoder_on_crtc(dev, crtc, encoder)
6257                 if (encoder->pre_enable)
6258                         encoder->pre_enable(encoder);
6259 
6260         i9xx_enable_pll(intel_crtc);
6261 
6262         i9xx_pfit_enable(intel_crtc);
6263 
6264         intel_crtc_load_lut(crtc);
6265 
6266         intel_update_watermarks(crtc);
6267         intel_enable_pipe(intel_crtc);
6268 
6269         assert_vblank_disabled(crtc);
6270         drm_crtc_vblank_on(crtc);
6271 
6272         for_each_encoder_on_crtc(dev, crtc, encoder)
6273                 encoder->enable(encoder);
6274 }
6275 
6276 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6277 {
6278         struct drm_device *dev = crtc->base.dev;
6279         struct drm_i915_private *dev_priv = dev->dev_private;
6280 
6281         if (!crtc->config->gmch_pfit.control)
6282                 return;
6283 
6284         assert_pipe_disabled(dev_priv, crtc->pipe);
6285 
6286         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6287                          I915_READ(PFIT_CONTROL));
6288         I915_WRITE(PFIT_CONTROL, 0);
6289 }
6290 
6291 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6292 {
6293         struct drm_device *dev = crtc->dev;
6294         struct drm_i915_private *dev_priv = dev->dev_private;
6295         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6296         struct intel_encoder *encoder;
6297         int pipe = intel_crtc->pipe;
6298 
6299         /*
6300          * On gen2 planes are double buffered but the pipe isn't, so we must
6301          * wait for planes to fully turn off before disabling the pipe.
6302          * We also need to wait on all gmch platforms because of the
6303          * self-refresh mode constraint explained above.
6304          */
6305         intel_wait_for_vblank(dev, pipe);
6306 
6307         for_each_encoder_on_crtc(dev, crtc, encoder)
6308                 encoder->disable(encoder);
6309 
6310         drm_crtc_vblank_off(crtc);
6311         assert_vblank_disabled(crtc);
6312 
6313         intel_disable_pipe(intel_crtc);
6314 
6315         i9xx_pfit_disable(intel_crtc);
6316 
6317         for_each_encoder_on_crtc(dev, crtc, encoder)
6318                 if (encoder->post_disable)
6319                         encoder->post_disable(encoder);
6320 
6321         if (!intel_crtc->config->has_dsi_encoder) {
6322                 if (IS_CHERRYVIEW(dev))
6323                         chv_disable_pll(dev_priv, pipe);
6324                 else if (IS_VALLEYVIEW(dev))
6325                         vlv_disable_pll(dev_priv, pipe);
6326                 else
6327                         i9xx_disable_pll(intel_crtc);
6328         }
6329 
6330         for_each_encoder_on_crtc(dev, crtc, encoder)
6331                 if (encoder->post_pll_disable)
6332                         encoder->post_pll_disable(encoder);
6333 
6334         if (!IS_GEN2(dev))
6335                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6336 }
6337 
6338 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6339 {
6340         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6341         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6342         enum intel_display_power_domain domain;
6343         unsigned long domains;
6344 
6345         if (!intel_crtc->active)
6346                 return;
6347 
6348         if (to_intel_plane_state(crtc->primary->state)->visible) {
6349                 WARN_ON(intel_crtc->unpin_work);
6350 
6351                 intel_pre_disable_primary(crtc);
6352 
6353                 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6354                 to_intel_plane_state(crtc->primary->state)->visible = false;
6355         }
6356 
6357         dev_priv->display.crtc_disable(crtc);
6358         intel_crtc->active = false;
6359         intel_fbc_disable(intel_crtc);
6360         intel_update_watermarks(crtc);
6361         intel_disable_shared_dpll(intel_crtc);
6362 
6363         domains = intel_crtc->enabled_power_domains;
6364         for_each_power_domain(domain, domains)
6365                 intel_display_power_put(dev_priv, domain);
6366         intel_crtc->enabled_power_domains = 0;
6367 
6368         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6369         dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6370 }
6371 
6372 /*
6373  * turn all crtc's off, but do not adjust state
6374  * This has to be paired with a call to intel_modeset_setup_hw_state.
6375  */
6376 int intel_display_suspend(struct drm_device *dev)
6377 {
6378         struct drm_i915_private *dev_priv = to_i915(dev);
6379         struct drm_atomic_state *state;
6380         int ret;
6381 
6382         state = drm_atomic_helper_suspend(dev);
6383         ret = PTR_ERR_OR_ZERO(state);
6384         if (ret)
6385                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6386         else
6387                 dev_priv->modeset_restore_state = state;
6388         return ret;
6389 }
6390 
6391 void intel_encoder_destroy(struct drm_encoder *encoder)
6392 {
6393         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6394 
6395         drm_encoder_cleanup(encoder);
6396         kfree(intel_encoder);
6397 }
6398 
6399 /* Cross check the actual hw state with our own modeset state tracking (and it's
6400  * internal consistency). */
6401 static void intel_connector_check_state(struct intel_connector *connector)
6402 {
6403         struct drm_crtc *crtc = connector->base.state->crtc;
6404 
6405         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6406                       connector->base.base.id,
6407                       connector->base.name);
6408 
6409         if (connector->get_hw_state(connector)) {
6410                 struct intel_encoder *encoder = connector->encoder;
6411                 struct drm_connector_state *conn_state = connector->base.state;
6412 
6413                 I915_STATE_WARN(!crtc,
6414                          "connector enabled without attached crtc\n");
6415 
6416                 if (!crtc)
6417                         return;
6418 
6419                 I915_STATE_WARN(!crtc->state->active,
6420                       "connector is active, but attached crtc isn't\n");
6421 
6422                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6423                         return;
6424 
6425                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6426                         "atomic encoder doesn't match attached encoder\n");
6427 
6428                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6429                         "attached encoder crtc differs from connector crtc\n");
6430         } else {
6431                 I915_STATE_WARN(crtc && crtc->state->active,
6432                         "attached crtc is active, but connector isn't\n");
6433                 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6434                         "best encoder set without crtc!\n");
6435         }
6436 }
6437 
6438 int intel_connector_init(struct intel_connector *connector)
6439 {
6440         drm_atomic_helper_connector_reset(&connector->base);
6441 
6442         if (!connector->base.state)
6443                 return -ENOMEM;
6444 
6445         return 0;
6446 }
6447 
6448 struct intel_connector *intel_connector_alloc(void)
6449 {
6450         struct intel_connector *connector;
6451 
6452         connector = kzalloc(sizeof *connector, GFP_KERNEL);
6453         if (!connector)
6454                 return NULL;
6455 
6456         if (intel_connector_init(connector) < 0) {
6457                 kfree(connector);