Version:  2.0.40 2.2.26 2.4.37 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4

Linux/drivers/gpu/drm/i915/intel_display.c

  1 /*
  2  * Copyright © 2006-2007 Intel Corporation
  3  *
  4  * Permission is hereby granted, free of charge, to any person obtaining a
  5  * copy of this software and associated documentation files (the "Software"),
  6  * to deal in the Software without restriction, including without limitation
  7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8  * and/or sell copies of the Software, and to permit persons to whom the
  9  * Software is furnished to do so, subject to the following conditions:
 10  *
 11  * The above copyright notice and this permission notice (including the next
 12  * paragraph) shall be included in all copies or substantial portions of the
 13  * Software.
 14  *
 15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 21  * DEALINGS IN THE SOFTWARE.
 22  *
 23  * Authors:
 24  *      Eric Anholt <eric@anholt.net>
 25  */
 26 
 27 #include <linux/dmi.h>
 28 #include <linux/module.h>
 29 #include <linux/input.h>
 30 #include <linux/i2c.h>
 31 #include <linux/kernel.h>
 32 #include <linux/slab.h>
 33 #include <linux/vgaarb.h>
 34 #include <drm/drm_edid.h>
 35 #include <drm/drmP.h>
 36 #include "intel_drv.h"
 37 #include <drm/i915_drm.h>
 38 #include "i915_drv.h"
 39 #include "i915_trace.h"
 40 #include <drm/drm_atomic.h>
 41 #include <drm/drm_atomic_helper.h>
 42 #include <drm/drm_dp_helper.h>
 43 #include <drm/drm_crtc_helper.h>
 44 #include <drm/drm_plane_helper.h>
 45 #include <drm/drm_rect.h>
 46 #include <linux/dma_remapping.h>
 47 
 48 /* Primary plane formats for gen <= 3 */
 49 static const uint32_t i8xx_primary_formats[] = {
 50         DRM_FORMAT_C8,
 51         DRM_FORMAT_RGB565,
 52         DRM_FORMAT_XRGB1555,
 53         DRM_FORMAT_XRGB8888,
 54 };
 55 
 56 /* Primary plane formats for gen >= 4 */
 57 static const uint32_t i965_primary_formats[] = {
 58         DRM_FORMAT_C8,
 59         DRM_FORMAT_RGB565,
 60         DRM_FORMAT_XRGB8888,
 61         DRM_FORMAT_XBGR8888,
 62         DRM_FORMAT_XRGB2101010,
 63         DRM_FORMAT_XBGR2101010,
 64 };
 65 
 66 static const uint32_t skl_primary_formats[] = {
 67         DRM_FORMAT_C8,
 68         DRM_FORMAT_RGB565,
 69         DRM_FORMAT_XRGB8888,
 70         DRM_FORMAT_XBGR8888,
 71         DRM_FORMAT_ARGB8888,
 72         DRM_FORMAT_ABGR8888,
 73         DRM_FORMAT_XRGB2101010,
 74         DRM_FORMAT_XBGR2101010,
 75         DRM_FORMAT_YUYV,
 76         DRM_FORMAT_YVYU,
 77         DRM_FORMAT_UYVY,
 78         DRM_FORMAT_VYUY,
 79 };
 80 
 81 /* Cursor formats */
 82 static const uint32_t intel_cursor_formats[] = {
 83         DRM_FORMAT_ARGB8888,
 84 };
 85 
 86 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 87 
 88 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
 89                                 struct intel_crtc_state *pipe_config);
 90 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 91                                    struct intel_crtc_state *pipe_config);
 92 
 93 static int intel_framebuffer_init(struct drm_device *dev,
 94                                   struct intel_framebuffer *ifb,
 95                                   struct drm_mode_fb_cmd2 *mode_cmd,
 96                                   struct drm_i915_gem_object *obj);
 97 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
 98 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
 99 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
100                                          struct intel_link_m_n *m_n,
101                                          struct intel_link_m_n *m2_n2);
102 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
103 static void haswell_set_pipeconf(struct drm_crtc *crtc);
104 static void intel_set_pipe_csc(struct drm_crtc *crtc);
105 static void vlv_prepare_pll(struct intel_crtc *crtc,
106                             const struct intel_crtc_state *pipe_config);
107 static void chv_prepare_pll(struct intel_crtc *crtc,
108                             const struct intel_crtc_state *pipe_config);
109 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
111 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
112         struct intel_crtc_state *crtc_state);
113 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
114                            int num_connectors);
115 static void skylake_pfit_enable(struct intel_crtc *crtc);
116 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117 static void ironlake_pfit_enable(struct intel_crtc *crtc);
118 static void intel_modeset_setup_hw_state(struct drm_device *dev);
119 static void intel_pre_disable_primary(struct drm_crtc *crtc);
120 
121 typedef struct {
122         int     min, max;
123 } intel_range_t;
124 
125 typedef struct {
126         int     dot_limit;
127         int     p2_slow, p2_fast;
128 } intel_p2_t;
129 
130 typedef struct intel_limit intel_limit_t;
131 struct intel_limit {
132         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
133         intel_p2_t          p2;
134 };
135 
136 /* returns HPLL frequency in kHz */
137 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
138 {
139         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
140 
141         /* Obtain SKU information */
142         mutex_lock(&dev_priv->sb_lock);
143         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
144                 CCK_FUSE_HPLL_FREQ_MASK;
145         mutex_unlock(&dev_priv->sb_lock);
146 
147         return vco_freq[hpll_freq] * 1000;
148 }
149 
150 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
151                                   const char *name, u32 reg)
152 {
153         u32 val;
154         int divider;
155 
156         if (dev_priv->hpll_freq == 0)
157                 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
158 
159         mutex_lock(&dev_priv->sb_lock);
160         val = vlv_cck_read(dev_priv, reg);
161         mutex_unlock(&dev_priv->sb_lock);
162 
163         divider = val & CCK_FREQUENCY_VALUES;
164 
165         WARN((val & CCK_FREQUENCY_STATUS) !=
166              (divider << CCK_FREQUENCY_STATUS_SHIFT),
167              "%s change in progress\n", name);
168 
169         return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
170 }
171 
172 int
173 intel_pch_rawclk(struct drm_device *dev)
174 {
175         struct drm_i915_private *dev_priv = dev->dev_private;
176 
177         WARN_ON(!HAS_PCH_SPLIT(dev));
178 
179         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
180 }
181 
182 /* hrawclock is 1/4 the FSB frequency */
183 int intel_hrawclk(struct drm_device *dev)
184 {
185         struct drm_i915_private *dev_priv = dev->dev_private;
186         uint32_t clkcfg;
187 
188         /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
189         if (IS_VALLEYVIEW(dev))
190                 return 200;
191 
192         clkcfg = I915_READ(CLKCFG);
193         switch (clkcfg & CLKCFG_FSB_MASK) {
194         case CLKCFG_FSB_400:
195                 return 100;
196         case CLKCFG_FSB_533:
197                 return 133;
198         case CLKCFG_FSB_667:
199                 return 166;
200         case CLKCFG_FSB_800:
201                 return 200;
202         case CLKCFG_FSB_1067:
203                 return 266;
204         case CLKCFG_FSB_1333:
205                 return 333;
206         /* these two are just a guess; one of them might be right */
207         case CLKCFG_FSB_1600:
208         case CLKCFG_FSB_1600_ALT:
209                 return 400;
210         default:
211                 return 133;
212         }
213 }
214 
215 static void intel_update_czclk(struct drm_i915_private *dev_priv)
216 {
217         if (!IS_VALLEYVIEW(dev_priv))
218                 return;
219 
220         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
221                                                       CCK_CZ_CLOCK_CONTROL);
222 
223         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
224 }
225 
226 static inline u32 /* units of 100MHz */
227 intel_fdi_link_freq(struct drm_device *dev)
228 {
229         if (IS_GEN5(dev)) {
230                 struct drm_i915_private *dev_priv = dev->dev_private;
231                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
232         } else
233                 return 27;
234 }
235 
236 static const intel_limit_t intel_limits_i8xx_dac = {
237         .dot = { .min = 25000, .max = 350000 },
238         .vco = { .min = 908000, .max = 1512000 },
239         .n = { .min = 2, .max = 16 },
240         .m = { .min = 96, .max = 140 },
241         .m1 = { .min = 18, .max = 26 },
242         .m2 = { .min = 6, .max = 16 },
243         .p = { .min = 4, .max = 128 },
244         .p1 = { .min = 2, .max = 33 },
245         .p2 = { .dot_limit = 165000,
246                 .p2_slow = 4, .p2_fast = 2 },
247 };
248 
249 static const intel_limit_t intel_limits_i8xx_dvo = {
250         .dot = { .min = 25000, .max = 350000 },
251         .vco = { .min = 908000, .max = 1512000 },
252         .n = { .min = 2, .max = 16 },
253         .m = { .min = 96, .max = 140 },
254         .m1 = { .min = 18, .max = 26 },
255         .m2 = { .min = 6, .max = 16 },
256         .p = { .min = 4, .max = 128 },
257         .p1 = { .min = 2, .max = 33 },
258         .p2 = { .dot_limit = 165000,
259                 .p2_slow = 4, .p2_fast = 4 },
260 };
261 
262 static const intel_limit_t intel_limits_i8xx_lvds = {
263         .dot = { .min = 25000, .max = 350000 },
264         .vco = { .min = 908000, .max = 1512000 },
265         .n = { .min = 2, .max = 16 },
266         .m = { .min = 96, .max = 140 },
267         .m1 = { .min = 18, .max = 26 },
268         .m2 = { .min = 6, .max = 16 },
269         .p = { .min = 4, .max = 128 },
270         .p1 = { .min = 1, .max = 6 },
271         .p2 = { .dot_limit = 165000,
272                 .p2_slow = 14, .p2_fast = 7 },
273 };
274 
275 static const intel_limit_t intel_limits_i9xx_sdvo = {
276         .dot = { .min = 20000, .max = 400000 },
277         .vco = { .min = 1400000, .max = 2800000 },
278         .n = { .min = 1, .max = 6 },
279         .m = { .min = 70, .max = 120 },
280         .m1 = { .min = 8, .max = 18 },
281         .m2 = { .min = 3, .max = 7 },
282         .p = { .min = 5, .max = 80 },
283         .p1 = { .min = 1, .max = 8 },
284         .p2 = { .dot_limit = 200000,
285                 .p2_slow = 10, .p2_fast = 5 },
286 };
287 
288 static const intel_limit_t intel_limits_i9xx_lvds = {
289         .dot = { .min = 20000, .max = 400000 },
290         .vco = { .min = 1400000, .max = 2800000 },
291         .n = { .min = 1, .max = 6 },
292         .m = { .min = 70, .max = 120 },
293         .m1 = { .min = 8, .max = 18 },
294         .m2 = { .min = 3, .max = 7 },
295         .p = { .min = 7, .max = 98 },
296         .p1 = { .min = 1, .max = 8 },
297         .p2 = { .dot_limit = 112000,
298                 .p2_slow = 14, .p2_fast = 7 },
299 };
300 
301 
302 static const intel_limit_t intel_limits_g4x_sdvo = {
303         .dot = { .min = 25000, .max = 270000 },
304         .vco = { .min = 1750000, .max = 3500000},
305         .n = { .min = 1, .max = 4 },
306         .m = { .min = 104, .max = 138 },
307         .m1 = { .min = 17, .max = 23 },
308         .m2 = { .min = 5, .max = 11 },
309         .p = { .min = 10, .max = 30 },
310         .p1 = { .min = 1, .max = 3},
311         .p2 = { .dot_limit = 270000,
312                 .p2_slow = 10,
313                 .p2_fast = 10
314         },
315 };
316 
317 static const intel_limit_t intel_limits_g4x_hdmi = {
318         .dot = { .min = 22000, .max = 400000 },
319         .vco = { .min = 1750000, .max = 3500000},
320         .n = { .min = 1, .max = 4 },
321         .m = { .min = 104, .max = 138 },
322         .m1 = { .min = 16, .max = 23 },
323         .m2 = { .min = 5, .max = 11 },
324         .p = { .min = 5, .max = 80 },
325         .p1 = { .min = 1, .max = 8},
326         .p2 = { .dot_limit = 165000,
327                 .p2_slow = 10, .p2_fast = 5 },
328 };
329 
330 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
331         .dot = { .min = 20000, .max = 115000 },
332         .vco = { .min = 1750000, .max = 3500000 },
333         .n = { .min = 1, .max = 3 },
334         .m = { .min = 104, .max = 138 },
335         .m1 = { .min = 17, .max = 23 },
336         .m2 = { .min = 5, .max = 11 },
337         .p = { .min = 28, .max = 112 },
338         .p1 = { .min = 2, .max = 8 },
339         .p2 = { .dot_limit = 0,
340                 .p2_slow = 14, .p2_fast = 14
341         },
342 };
343 
344 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
345         .dot = { .min = 80000, .max = 224000 },
346         .vco = { .min = 1750000, .max = 3500000 },
347         .n = { .min = 1, .max = 3 },
348         .m = { .min = 104, .max = 138 },
349         .m1 = { .min = 17, .max = 23 },
350         .m2 = { .min = 5, .max = 11 },
351         .p = { .min = 14, .max = 42 },
352         .p1 = { .min = 2, .max = 6 },
353         .p2 = { .dot_limit = 0,
354                 .p2_slow = 7, .p2_fast = 7
355         },
356 };
357 
358 static const intel_limit_t intel_limits_pineview_sdvo = {
359         .dot = { .min = 20000, .max = 400000},
360         .vco = { .min = 1700000, .max = 3500000 },
361         /* Pineview's Ncounter is a ring counter */
362         .n = { .min = 3, .max = 6 },
363         .m = { .min = 2, .max = 256 },
364         /* Pineview only has one combined m divider, which we treat as m2. */
365         .m1 = { .min = 0, .max = 0 },
366         .m2 = { .min = 0, .max = 254 },
367         .p = { .min = 5, .max = 80 },
368         .p1 = { .min = 1, .max = 8 },
369         .p2 = { .dot_limit = 200000,
370                 .p2_slow = 10, .p2_fast = 5 },
371 };
372 
373 static const intel_limit_t intel_limits_pineview_lvds = {
374         .dot = { .min = 20000, .max = 400000 },
375         .vco = { .min = 1700000, .max = 3500000 },
376         .n = { .min = 3, .max = 6 },
377         .m = { .min = 2, .max = 256 },
378         .m1 = { .min = 0, .max = 0 },
379         .m2 = { .min = 0, .max = 254 },
380         .p = { .min = 7, .max = 112 },
381         .p1 = { .min = 1, .max = 8 },
382         .p2 = { .dot_limit = 112000,
383                 .p2_slow = 14, .p2_fast = 14 },
384 };
385 
386 /* Ironlake / Sandybridge
387  *
388  * We calculate clock using (register_value + 2) for N/M1/M2, so here
389  * the range value for them is (actual_value - 2).
390  */
391 static const intel_limit_t intel_limits_ironlake_dac = {
392         .dot = { .min = 25000, .max = 350000 },
393         .vco = { .min = 1760000, .max = 3510000 },
394         .n = { .min = 1, .max = 5 },
395         .m = { .min = 79, .max = 127 },
396         .m1 = { .min = 12, .max = 22 },
397         .m2 = { .min = 5, .max = 9 },
398         .p = { .min = 5, .max = 80 },
399         .p1 = { .min = 1, .max = 8 },
400         .p2 = { .dot_limit = 225000,
401                 .p2_slow = 10, .p2_fast = 5 },
402 };
403 
404 static const intel_limit_t intel_limits_ironlake_single_lvds = {
405         .dot = { .min = 25000, .max = 350000 },
406         .vco = { .min = 1760000, .max = 3510000 },
407         .n = { .min = 1, .max = 3 },
408         .m = { .min = 79, .max = 118 },
409         .m1 = { .min = 12, .max = 22 },
410         .m2 = { .min = 5, .max = 9 },
411         .p = { .min = 28, .max = 112 },
412         .p1 = { .min = 2, .max = 8 },
413         .p2 = { .dot_limit = 225000,
414                 .p2_slow = 14, .p2_fast = 14 },
415 };
416 
417 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
418         .dot = { .min = 25000, .max = 350000 },
419         .vco = { .min = 1760000, .max = 3510000 },
420         .n = { .min = 1, .max = 3 },
421         .m = { .min = 79, .max = 127 },
422         .m1 = { .min = 12, .max = 22 },
423         .m2 = { .min = 5, .max = 9 },
424         .p = { .min = 14, .max = 56 },
425         .p1 = { .min = 2, .max = 8 },
426         .p2 = { .dot_limit = 225000,
427                 .p2_slow = 7, .p2_fast = 7 },
428 };
429 
430 /* LVDS 100mhz refclk limits. */
431 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
432         .dot = { .min = 25000, .max = 350000 },
433         .vco = { .min = 1760000, .max = 3510000 },
434         .n = { .min = 1, .max = 2 },
435         .m = { .min = 79, .max = 126 },
436         .m1 = { .min = 12, .max = 22 },
437         .m2 = { .min = 5, .max = 9 },
438         .p = { .min = 28, .max = 112 },
439         .p1 = { .min = 2, .max = 8 },
440         .p2 = { .dot_limit = 225000,
441                 .p2_slow = 14, .p2_fast = 14 },
442 };
443 
444 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
445         .dot = { .min = 25000, .max = 350000 },
446         .vco = { .min = 1760000, .max = 3510000 },
447         .n = { .min = 1, .max = 3 },
448         .m = { .min = 79, .max = 126 },
449         .m1 = { .min = 12, .max = 22 },
450         .m2 = { .min = 5, .max = 9 },
451         .p = { .min = 14, .max = 42 },
452         .p1 = { .min = 2, .max = 6 },
453         .p2 = { .dot_limit = 225000,
454                 .p2_slow = 7, .p2_fast = 7 },
455 };
456 
457 static const intel_limit_t intel_limits_vlv = {
458          /*
459           * These are the data rate limits (measured in fast clocks)
460           * since those are the strictest limits we have. The fast
461           * clock and actual rate limits are more relaxed, so checking
462           * them would make no difference.
463           */
464         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
465         .vco = { .min = 4000000, .max = 6000000 },
466         .n = { .min = 1, .max = 7 },
467         .m1 = { .min = 2, .max = 3 },
468         .m2 = { .min = 11, .max = 156 },
469         .p1 = { .min = 2, .max = 3 },
470         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
471 };
472 
473 static const intel_limit_t intel_limits_chv = {
474         /*
475          * These are the data rate limits (measured in fast clocks)
476          * since those are the strictest limits we have.  The fast
477          * clock and actual rate limits are more relaxed, so checking
478          * them would make no difference.
479          */
480         .dot = { .min = 25000 * 5, .max = 540000 * 5},
481         .vco = { .min = 4800000, .max = 6480000 },
482         .n = { .min = 1, .max = 1 },
483         .m1 = { .min = 2, .max = 2 },
484         .m2 = { .min = 24 << 22, .max = 175 << 22 },
485         .p1 = { .min = 2, .max = 4 },
486         .p2 = { .p2_slow = 1, .p2_fast = 14 },
487 };
488 
489 static const intel_limit_t intel_limits_bxt = {
490         /* FIXME: find real dot limits */
491         .dot = { .min = 0, .max = INT_MAX },
492         .vco = { .min = 4800000, .max = 6700000 },
493         .n = { .min = 1, .max = 1 },
494         .m1 = { .min = 2, .max = 2 },
495         /* FIXME: find real m2 limits */
496         .m2 = { .min = 2 << 22, .max = 255 << 22 },
497         .p1 = { .min = 2, .max = 4 },
498         .p2 = { .p2_slow = 1, .p2_fast = 20 },
499 };
500 
501 static bool
502 needs_modeset(struct drm_crtc_state *state)
503 {
504         return drm_atomic_crtc_needs_modeset(state);
505 }
506 
507 /**
508  * Returns whether any output on the specified pipe is of the specified type
509  */
510 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
511 {
512         struct drm_device *dev = crtc->base.dev;
513         struct intel_encoder *encoder;
514 
515         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
516                 if (encoder->type == type)
517                         return true;
518 
519         return false;
520 }
521 
522 /**
523  * Returns whether any output on the specified pipe will have the specified
524  * type after a staged modeset is complete, i.e., the same as
525  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
526  * encoder->crtc.
527  */
528 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
529                                       int type)
530 {
531         struct drm_atomic_state *state = crtc_state->base.state;
532         struct drm_connector *connector;
533         struct drm_connector_state *connector_state;
534         struct intel_encoder *encoder;
535         int i, num_connectors = 0;
536 
537         for_each_connector_in_state(state, connector, connector_state, i) {
538                 if (connector_state->crtc != crtc_state->base.crtc)
539                         continue;
540 
541                 num_connectors++;
542 
543                 encoder = to_intel_encoder(connector_state->best_encoder);
544                 if (encoder->type == type)
545                         return true;
546         }
547 
548         WARN_ON(num_connectors == 0);
549 
550         return false;
551 }
552 
553 static const intel_limit_t *
554 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
555 {
556         struct drm_device *dev = crtc_state->base.crtc->dev;
557         const intel_limit_t *limit;
558 
559         if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
560                 if (intel_is_dual_link_lvds(dev)) {
561                         if (refclk == 100000)
562                                 limit = &intel_limits_ironlake_dual_lvds_100m;
563                         else
564                                 limit = &intel_limits_ironlake_dual_lvds;
565                 } else {
566                         if (refclk == 100000)
567                                 limit = &intel_limits_ironlake_single_lvds_100m;
568                         else
569                                 limit = &intel_limits_ironlake_single_lvds;
570                 }
571         } else
572                 limit = &intel_limits_ironlake_dac;
573 
574         return limit;
575 }
576 
577 static const intel_limit_t *
578 intel_g4x_limit(struct intel_crtc_state *crtc_state)
579 {
580         struct drm_device *dev = crtc_state->base.crtc->dev;
581         const intel_limit_t *limit;
582 
583         if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
584                 if (intel_is_dual_link_lvds(dev))
585                         limit = &intel_limits_g4x_dual_channel_lvds;
586                 else
587                         limit = &intel_limits_g4x_single_channel_lvds;
588         } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
589                    intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
590                 limit = &intel_limits_g4x_hdmi;
591         } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
592                 limit = &intel_limits_g4x_sdvo;
593         } else /* The option is for other outputs */
594                 limit = &intel_limits_i9xx_sdvo;
595 
596         return limit;
597 }
598 
599 static const intel_limit_t *
600 intel_limit(struct intel_crtc_state *crtc_state, int refclk)
601 {
602         struct drm_device *dev = crtc_state->base.crtc->dev;
603         const intel_limit_t *limit;
604 
605         if (IS_BROXTON(dev))
606                 limit = &intel_limits_bxt;
607         else if (HAS_PCH_SPLIT(dev))
608                 limit = intel_ironlake_limit(crtc_state, refclk);
609         else if (IS_G4X(dev)) {
610                 limit = intel_g4x_limit(crtc_state);
611         } else if (IS_PINEVIEW(dev)) {
612                 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
613                         limit = &intel_limits_pineview_lvds;
614                 else
615                         limit = &intel_limits_pineview_sdvo;
616         } else if (IS_CHERRYVIEW(dev)) {
617                 limit = &intel_limits_chv;
618         } else if (IS_VALLEYVIEW(dev)) {
619                 limit = &intel_limits_vlv;
620         } else if (!IS_GEN2(dev)) {
621                 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
622                         limit = &intel_limits_i9xx_lvds;
623                 else
624                         limit = &intel_limits_i9xx_sdvo;
625         } else {
626                 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
627                         limit = &intel_limits_i8xx_lvds;
628                 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
629                         limit = &intel_limits_i8xx_dvo;
630                 else
631                         limit = &intel_limits_i8xx_dac;
632         }
633         return limit;
634 }
635 
636 /*
637  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
638  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
639  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
640  * The helpers' return value is the rate of the clock that is fed to the
641  * display engine's pipe which can be the above fast dot clock rate or a
642  * divided-down version of it.
643  */
644 /* m1 is reserved as 0 in Pineview, n is a ring counter */
645 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
646 {
647         clock->m = clock->m2 + 2;
648         clock->p = clock->p1 * clock->p2;
649         if (WARN_ON(clock->n == 0 || clock->p == 0))
650                 return 0;
651         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
652         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
653 
654         return clock->dot;
655 }
656 
657 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
658 {
659         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
660 }
661 
662 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
663 {
664         clock->m = i9xx_dpll_compute_m(clock);
665         clock->p = clock->p1 * clock->p2;
666         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
667                 return 0;
668         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
669         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
670 
671         return clock->dot;
672 }
673 
674 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
675 {
676         clock->m = clock->m1 * clock->m2;
677         clock->p = clock->p1 * clock->p2;
678         if (WARN_ON(clock->n == 0 || clock->p == 0))
679                 return 0;
680         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
681         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
682 
683         return clock->dot / 5;
684 }
685 
686 int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
687 {
688         clock->m = clock->m1 * clock->m2;
689         clock->p = clock->p1 * clock->p2;
690         if (WARN_ON(clock->n == 0 || clock->p == 0))
691                 return 0;
692         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
693                         clock->n << 22);
694         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
695 
696         return clock->dot / 5;
697 }
698 
699 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
700 /**
701  * Returns whether the given set of divisors are valid for a given refclk with
702  * the given connectors.
703  */
704 
705 static bool intel_PLL_is_valid(struct drm_device *dev,
706                                const intel_limit_t *limit,
707                                const intel_clock_t *clock)
708 {
709         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
710                 INTELPllInvalid("n out of range\n");
711         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
712                 INTELPllInvalid("p1 out of range\n");
713         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
714                 INTELPllInvalid("m2 out of range\n");
715         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
716                 INTELPllInvalid("m1 out of range\n");
717 
718         if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
719                 if (clock->m1 <= clock->m2)
720                         INTELPllInvalid("m1 <= m2\n");
721 
722         if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
723                 if (clock->p < limit->p.min || limit->p.max < clock->p)
724                         INTELPllInvalid("p out of range\n");
725                 if (clock->m < limit->m.min || limit->m.max < clock->m)
726                         INTELPllInvalid("m out of range\n");
727         }
728 
729         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
730                 INTELPllInvalid("vco out of range\n");
731         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
732          * connector, etc., rather than just a single range.
733          */
734         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
735                 INTELPllInvalid("dot out of range\n");
736 
737         return true;
738 }
739 
740 static int
741 i9xx_select_p2_div(const intel_limit_t *limit,
742                    const struct intel_crtc_state *crtc_state,
743                    int target)
744 {
745         struct drm_device *dev = crtc_state->base.crtc->dev;
746 
747         if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
748                 /*
749                  * For LVDS just rely on its current settings for dual-channel.
750                  * We haven't figured out how to reliably set up different
751                  * single/dual channel state, if we even can.
752                  */
753                 if (intel_is_dual_link_lvds(dev))
754                         return limit->p2.p2_fast;
755                 else
756                         return limit->p2.p2_slow;
757         } else {
758                 if (target < limit->p2.dot_limit)
759                         return limit->p2.p2_slow;
760                 else
761                         return limit->p2.p2_fast;
762         }
763 }
764 
765 static bool
766 i9xx_find_best_dpll(const intel_limit_t *limit,
767                     struct intel_crtc_state *crtc_state,
768                     int target, int refclk, intel_clock_t *match_clock,
769                     intel_clock_t *best_clock)
770 {
771         struct drm_device *dev = crtc_state->base.crtc->dev;
772         intel_clock_t clock;
773         int err = target;
774 
775         memset(best_clock, 0, sizeof(*best_clock));
776 
777         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
778 
779         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
780              clock.m1++) {
781                 for (clock.m2 = limit->m2.min;
782                      clock.m2 <= limit->m2.max; clock.m2++) {
783                         if (clock.m2 >= clock.m1)
784                                 break;
785                         for (clock.n = limit->n.min;
786                              clock.n <= limit->n.max; clock.n++) {
787                                 for (clock.p1 = limit->p1.min;
788                                         clock.p1 <= limit->p1.max; clock.p1++) {
789                                         int this_err;
790 
791                                         i9xx_calc_dpll_params(refclk, &clock);
792                                         if (!intel_PLL_is_valid(dev, limit,
793                                                                 &clock))
794                                                 continue;
795                                         if (match_clock &&
796                                             clock.p != match_clock->p)
797                                                 continue;
798 
799                                         this_err = abs(clock.dot - target);
800                                         if (this_err < err) {
801                                                 *best_clock = clock;
802                                                 err = this_err;
803                                         }
804                                 }
805                         }
806                 }
807         }
808 
809         return (err != target);
810 }
811 
812 static bool
813 pnv_find_best_dpll(const intel_limit_t *limit,
814                    struct intel_crtc_state *crtc_state,
815                    int target, int refclk, intel_clock_t *match_clock,
816                    intel_clock_t *best_clock)
817 {
818         struct drm_device *dev = crtc_state->base.crtc->dev;
819         intel_clock_t clock;
820         int err = target;
821 
822         memset(best_clock, 0, sizeof(*best_clock));
823 
824         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
825 
826         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
827              clock.m1++) {
828                 for (clock.m2 = limit->m2.min;
829                      clock.m2 <= limit->m2.max; clock.m2++) {
830                         for (clock.n = limit->n.min;
831                              clock.n <= limit->n.max; clock.n++) {
832                                 for (clock.p1 = limit->p1.min;
833                                         clock.p1 <= limit->p1.max; clock.p1++) {
834                                         int this_err;
835 
836                                         pnv_calc_dpll_params(refclk, &clock);
837                                         if (!intel_PLL_is_valid(dev, limit,
838                                                                 &clock))
839                                                 continue;
840                                         if (match_clock &&
841                                             clock.p != match_clock->p)
842                                                 continue;
843 
844                                         this_err = abs(clock.dot - target);
845                                         if (this_err < err) {
846                                                 *best_clock = clock;
847                                                 err = this_err;
848                                         }
849                                 }
850                         }
851                 }
852         }
853 
854         return (err != target);
855 }
856 
857 static bool
858 g4x_find_best_dpll(const intel_limit_t *limit,
859                    struct intel_crtc_state *crtc_state,
860                    int target, int refclk, intel_clock_t *match_clock,
861                    intel_clock_t *best_clock)
862 {
863         struct drm_device *dev = crtc_state->base.crtc->dev;
864         intel_clock_t clock;
865         int max_n;
866         bool found = false;
867         /* approximately equals target * 0.00585 */
868         int err_most = (target >> 8) + (target >> 9);
869 
870         memset(best_clock, 0, sizeof(*best_clock));
871 
872         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
873 
874         max_n = limit->n.max;
875         /* based on hardware requirement, prefer smaller n to precision */
876         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
877                 /* based on hardware requirement, prefere larger m1,m2 */
878                 for (clock.m1 = limit->m1.max;
879                      clock.m1 >= limit->m1.min; clock.m1--) {
880                         for (clock.m2 = limit->m2.max;
881                              clock.m2 >= limit->m2.min; clock.m2--) {
882                                 for (clock.p1 = limit->p1.max;
883                                      clock.p1 >= limit->p1.min; clock.p1--) {
884                                         int this_err;
885 
886                                         i9xx_calc_dpll_params(refclk, &clock);
887                                         if (!intel_PLL_is_valid(dev, limit,
888                                                                 &clock))
889                                                 continue;
890 
891                                         this_err = abs(clock.dot - target);
892                                         if (this_err < err_most) {
893                                                 *best_clock = clock;
894                                                 err_most = this_err;
895                                                 max_n = clock.n;
896                                                 found = true;
897                                         }
898                                 }
899                         }
900                 }
901         }
902         return found;
903 }
904 
905 /*
906  * Check if the calculated PLL configuration is more optimal compared to the
907  * best configuration and error found so far. Return the calculated error.
908  */
909 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
910                                const intel_clock_t *calculated_clock,
911                                const intel_clock_t *best_clock,
912                                unsigned int best_error_ppm,
913                                unsigned int *error_ppm)
914 {
915         /*
916          * For CHV ignore the error and consider only the P value.
917          * Prefer a bigger P value based on HW requirements.
918          */
919         if (IS_CHERRYVIEW(dev)) {
920                 *error_ppm = 0;
921 
922                 return calculated_clock->p > best_clock->p;
923         }
924 
925         if (WARN_ON_ONCE(!target_freq))
926                 return false;
927 
928         *error_ppm = div_u64(1000000ULL *
929                                 abs(target_freq - calculated_clock->dot),
930                              target_freq);
931         /*
932          * Prefer a better P value over a better (smaller) error if the error
933          * is small. Ensure this preference for future configurations too by
934          * setting the error to 0.
935          */
936         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
937                 *error_ppm = 0;
938 
939                 return true;
940         }
941 
942         return *error_ppm + 10 < best_error_ppm;
943 }
944 
945 static bool
946 vlv_find_best_dpll(const intel_limit_t *limit,
947                    struct intel_crtc_state *crtc_state,
948                    int target, int refclk, intel_clock_t *match_clock,
949                    intel_clock_t *best_clock)
950 {
951         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
952         struct drm_device *dev = crtc->base.dev;
953         intel_clock_t clock;
954         unsigned int bestppm = 1000000;
955         /* min update 19.2 MHz */
956         int max_n = min(limit->n.max, refclk / 19200);
957         bool found = false;
958 
959         target *= 5; /* fast clock */
960 
961         memset(best_clock, 0, sizeof(*best_clock));
962 
963         /* based on hardware requirement, prefer smaller n to precision */
964         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
965                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
966                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
967                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
968                                 clock.p = clock.p1 * clock.p2;
969                                 /* based on hardware requirement, prefer bigger m1,m2 values */
970                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
971                                         unsigned int ppm;
972 
973                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
974                                                                      refclk * clock.m1);
975 
976                                         vlv_calc_dpll_params(refclk, &clock);
977 
978                                         if (!intel_PLL_is_valid(dev, limit,
979                                                                 &clock))
980                                                 continue;
981 
982                                         if (!vlv_PLL_is_optimal(dev, target,
983                                                                 &clock,
984                                                                 best_clock,
985                                                                 bestppm, &ppm))
986                                                 continue;
987 
988                                         *best_clock = clock;
989                                         bestppm = ppm;
990                                         found = true;
991                                 }
992                         }
993                 }
994         }
995 
996         return found;
997 }
998 
999 static bool
1000 chv_find_best_dpll(const intel_limit_t *limit,
1001                    struct intel_crtc_state *crtc_state,
1002                    int target, int refclk, intel_clock_t *match_clock,
1003                    intel_clock_t *best_clock)
1004 {
1005         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1006         struct drm_device *dev = crtc->base.dev;
1007         unsigned int best_error_ppm;
1008         intel_clock_t clock;
1009         uint64_t m2;
1010         int found = false;
1011 
1012         memset(best_clock, 0, sizeof(*best_clock));
1013         best_error_ppm = 1000000;
1014 
1015         /*
1016          * Based on hardware doc, the n always set to 1, and m1 always
1017          * set to 2.  If requires to support 200Mhz refclk, we need to
1018          * revisit this because n may not 1 anymore.
1019          */
1020         clock.n = 1, clock.m1 = 2;
1021         target *= 5;    /* fast clock */
1022 
1023         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1024                 for (clock.p2 = limit->p2.p2_fast;
1025                                 clock.p2 >= limit->p2.p2_slow;
1026                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1027                         unsigned int error_ppm;
1028 
1029                         clock.p = clock.p1 * clock.p2;
1030 
1031                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1032                                         clock.n) << 22, refclk * clock.m1);
1033 
1034                         if (m2 > INT_MAX/clock.m1)
1035                                 continue;
1036 
1037                         clock.m2 = m2;
1038 
1039                         chv_calc_dpll_params(refclk, &clock);
1040 
1041                         if (!intel_PLL_is_valid(dev, limit, &clock))
1042                                 continue;
1043 
1044                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1045                                                 best_error_ppm, &error_ppm))
1046                                 continue;
1047 
1048                         *best_clock = clock;
1049                         best_error_ppm = error_ppm;
1050                         found = true;
1051                 }
1052         }
1053 
1054         return found;
1055 }
1056 
1057 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1058                         intel_clock_t *best_clock)
1059 {
1060         int refclk = i9xx_get_refclk(crtc_state, 0);
1061 
1062         return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1063                                   target_clock, refclk, NULL, best_clock);
1064 }
1065 
1066 bool intel_crtc_active(struct drm_crtc *crtc)
1067 {
1068         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1069 
1070         /* Be paranoid as we can arrive here with only partial
1071          * state retrieved from the hardware during setup.
1072          *
1073          * We can ditch the adjusted_mode.crtc_clock check as soon
1074          * as Haswell has gained clock readout/fastboot support.
1075          *
1076          * We can ditch the crtc->primary->fb check as soon as we can
1077          * properly reconstruct framebuffers.
1078          *
1079          * FIXME: The intel_crtc->active here should be switched to
1080          * crtc->state->active once we have proper CRTC states wired up
1081          * for atomic.
1082          */
1083         return intel_crtc->active && crtc->primary->state->fb &&
1084                 intel_crtc->config->base.adjusted_mode.crtc_clock;
1085 }
1086 
1087 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1088                                              enum pipe pipe)
1089 {
1090         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1091         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1092 
1093         return intel_crtc->config->cpu_transcoder;
1094 }
1095 
1096 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1097 {
1098         struct drm_i915_private *dev_priv = dev->dev_private;
1099         u32 reg = PIPEDSL(pipe);
1100         u32 line1, line2;
1101         u32 line_mask;
1102 
1103         if (IS_GEN2(dev))
1104                 line_mask = DSL_LINEMASK_GEN2;
1105         else
1106                 line_mask = DSL_LINEMASK_GEN3;
1107 
1108         line1 = I915_READ(reg) & line_mask;
1109         msleep(5);
1110         line2 = I915_READ(reg) & line_mask;
1111 
1112         return line1 == line2;
1113 }
1114 
1115 /*
1116  * intel_wait_for_pipe_off - wait for pipe to turn off
1117  * @crtc: crtc whose pipe to wait for
1118  *
1119  * After disabling a pipe, we can't wait for vblank in the usual way,
1120  * spinning on the vblank interrupt status bit, since we won't actually
1121  * see an interrupt when the pipe is disabled.
1122  *
1123  * On Gen4 and above:
1124  *   wait for the pipe register state bit to turn off
1125  *
1126  * Otherwise:
1127  *   wait for the display line value to settle (it usually
1128  *   ends up stopping at the start of the next frame).
1129  *
1130  */
1131 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1132 {
1133         struct drm_device *dev = crtc->base.dev;
1134         struct drm_i915_private *dev_priv = dev->dev_private;
1135         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1136         enum pipe pipe = crtc->pipe;
1137 
1138         if (INTEL_INFO(dev)->gen >= 4) {
1139                 int reg = PIPECONF(cpu_transcoder);
1140 
1141                 /* Wait for the Pipe State to go off */
1142                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1143                              100))
1144                         WARN(1, "pipe_off wait timed out\n");
1145         } else {
1146                 /* Wait for the display line to settle */
1147                 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1148                         WARN(1, "pipe_off wait timed out\n");
1149         }
1150 }
1151 
1152 static const char *state_string(bool enabled)
1153 {
1154         return enabled ? "on" : "off";
1155 }
1156 
1157 /* Only for pre-ILK configs */
1158 void assert_pll(struct drm_i915_private *dev_priv,
1159                 enum pipe pipe, bool state)
1160 {
1161         u32 val;
1162         bool cur_state;
1163 
1164         val = I915_READ(DPLL(pipe));
1165         cur_state = !!(val & DPLL_VCO_ENABLE);
1166         I915_STATE_WARN(cur_state != state,
1167              "PLL state assertion failure (expected %s, current %s)\n",
1168              state_string(state), state_string(cur_state));
1169 }
1170 
1171 /* XXX: the dsi pll is shared between MIPI DSI ports */
1172 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1173 {
1174         u32 val;
1175         bool cur_state;
1176 
1177         mutex_lock(&dev_priv->sb_lock);
1178         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1179         mutex_unlock(&dev_priv->sb_lock);
1180 
1181         cur_state = val & DSI_PLL_VCO_EN;
1182         I915_STATE_WARN(cur_state != state,
1183              "DSI PLL state assertion failure (expected %s, current %s)\n",
1184              state_string(state), state_string(cur_state));
1185 }
1186 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1187 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1188 
1189 struct intel_shared_dpll *
1190 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1191 {
1192         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1193 
1194         if (crtc->config->shared_dpll < 0)
1195                 return NULL;
1196 
1197         return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1198 }
1199 
1200 /* For ILK+ */
1201 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1202                         struct intel_shared_dpll *pll,
1203                         bool state)
1204 {
1205         bool cur_state;
1206         struct intel_dpll_hw_state hw_state;
1207 
1208         if (WARN (!pll,
1209                   "asserting DPLL %s with no DPLL\n", state_string(state)))
1210                 return;
1211 
1212         cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1213         I915_STATE_WARN(cur_state != state,
1214              "%s assertion failure (expected %s, current %s)\n",
1215              pll->name, state_string(state), state_string(cur_state));
1216 }
1217 
1218 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1219                           enum pipe pipe, bool state)
1220 {
1221         bool cur_state;
1222         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1223                                                                       pipe);
1224 
1225         if (HAS_DDI(dev_priv->dev)) {
1226                 /* DDI does not have a specific FDI_TX register */
1227                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1228                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1229         } else {
1230                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1231                 cur_state = !!(val & FDI_TX_ENABLE);
1232         }
1233         I915_STATE_WARN(cur_state != state,
1234              "FDI TX state assertion failure (expected %s, current %s)\n",
1235              state_string(state), state_string(cur_state));
1236 }
1237 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1238 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1239 
1240 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1241                           enum pipe pipe, bool state)
1242 {
1243         u32 val;
1244         bool cur_state;
1245 
1246         val = I915_READ(FDI_RX_CTL(pipe));
1247         cur_state = !!(val & FDI_RX_ENABLE);
1248         I915_STATE_WARN(cur_state != state,
1249              "FDI RX state assertion failure (expected %s, current %s)\n",
1250              state_string(state), state_string(cur_state));
1251 }
1252 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1253 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1254 
1255 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1256                                       enum pipe pipe)
1257 {
1258         u32 val;
1259 
1260         /* ILK FDI PLL is always enabled */
1261         if (INTEL_INFO(dev_priv->dev)->gen == 5)
1262                 return;
1263 
1264         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1265         if (HAS_DDI(dev_priv->dev))
1266                 return;
1267 
1268         val = I915_READ(FDI_TX_CTL(pipe));
1269         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1270 }
1271 
1272 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1273                        enum pipe pipe, bool state)
1274 {
1275         u32 val;
1276         bool cur_state;
1277 
1278         val = I915_READ(FDI_RX_CTL(pipe));
1279         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1280         I915_STATE_WARN(cur_state != state,
1281              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1282              state_string(state), state_string(cur_state));
1283 }
1284 
1285 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1286                            enum pipe pipe)
1287 {
1288         struct drm_device *dev = dev_priv->dev;
1289         int pp_reg;
1290         u32 val;
1291         enum pipe panel_pipe = PIPE_A;
1292         bool locked = true;
1293 
1294         if (WARN_ON(HAS_DDI(dev)))
1295                 return;
1296 
1297         if (HAS_PCH_SPLIT(dev)) {
1298                 u32 port_sel;
1299 
1300                 pp_reg = PCH_PP_CONTROL;
1301                 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1302 
1303                 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1304                     I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1305                         panel_pipe = PIPE_B;
1306                 /* XXX: else fix for eDP */
1307         } else if (IS_VALLEYVIEW(dev)) {
1308                 /* presumably write lock depends on pipe, not port select */
1309                 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1310                 panel_pipe = pipe;
1311         } else {
1312                 pp_reg = PP_CONTROL;
1313                 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1314                         panel_pipe = PIPE_B;
1315         }
1316 
1317         val = I915_READ(pp_reg);
1318         if (!(val & PANEL_POWER_ON) ||
1319             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1320                 locked = false;
1321 
1322         I915_STATE_WARN(panel_pipe == pipe && locked,
1323              "panel assertion failure, pipe %c regs locked\n",
1324              pipe_name(pipe));
1325 }
1326 
1327 static void assert_cursor(struct drm_i915_private *dev_priv,
1328                           enum pipe pipe, bool state)
1329 {
1330         struct drm_device *dev = dev_priv->dev;
1331         bool cur_state;
1332 
1333         if (IS_845G(dev) || IS_I865G(dev))
1334                 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1335         else
1336                 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1337 
1338         I915_STATE_WARN(cur_state != state,
1339              "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1340              pipe_name(pipe), state_string(state), state_string(cur_state));
1341 }
1342 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1343 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1344 
1345 void assert_pipe(struct drm_i915_private *dev_priv,
1346                  enum pipe pipe, bool state)
1347 {
1348         bool cur_state;
1349         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1350                                                                       pipe);
1351 
1352         /* if we need the pipe quirk it must be always on */
1353         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1354             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1355                 state = true;
1356 
1357         if (!intel_display_power_is_enabled(dev_priv,
1358                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1359                 cur_state = false;
1360         } else {
1361                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1362                 cur_state = !!(val & PIPECONF_ENABLE);
1363         }
1364 
1365         I915_STATE_WARN(cur_state != state,
1366              "pipe %c assertion failure (expected %s, current %s)\n",
1367              pipe_name(pipe), state_string(state), state_string(cur_state));
1368 }
1369 
1370 static void assert_plane(struct drm_i915_private *dev_priv,
1371                          enum plane plane, bool state)
1372 {
1373         u32 val;
1374         bool cur_state;
1375 
1376         val = I915_READ(DSPCNTR(plane));
1377         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1378         I915_STATE_WARN(cur_state != state,
1379              "plane %c assertion failure (expected %s, current %s)\n",
1380              plane_name(plane), state_string(state), state_string(cur_state));
1381 }
1382 
1383 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1384 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1385 
1386 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1387                                    enum pipe pipe)
1388 {
1389         struct drm_device *dev = dev_priv->dev;
1390         int i;
1391 
1392         /* Primary planes are fixed to pipes on gen4+ */
1393         if (INTEL_INFO(dev)->gen >= 4) {
1394                 u32 val = I915_READ(DSPCNTR(pipe));
1395                 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1396                      "plane %c assertion failure, should be disabled but not\n",
1397                      plane_name(pipe));
1398                 return;
1399         }
1400 
1401         /* Need to check both planes against the pipe */
1402         for_each_pipe(dev_priv, i) {
1403                 u32 val = I915_READ(DSPCNTR(i));
1404                 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1405                         DISPPLANE_SEL_PIPE_SHIFT;
1406                 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1407                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1408                      plane_name(i), pipe_name(pipe));
1409         }
1410 }
1411 
1412 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1413                                     enum pipe pipe)
1414 {
1415         struct drm_device *dev = dev_priv->dev;
1416         int sprite;
1417 
1418         if (INTEL_INFO(dev)->gen >= 9) {
1419                 for_each_sprite(dev_priv, pipe, sprite) {
1420                         u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1421                         I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1422                              "plane %d assertion failure, should be off on pipe %c but is still active\n",
1423                              sprite, pipe_name(pipe));
1424                 }
1425         } else if (IS_VALLEYVIEW(dev)) {
1426                 for_each_sprite(dev_priv, pipe, sprite) {
1427                         u32 val = I915_READ(SPCNTR(pipe, sprite));
1428                         I915_STATE_WARN(val & SP_ENABLE,
1429                              "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1430                              sprite_name(pipe, sprite), pipe_name(pipe));
1431                 }
1432         } else if (INTEL_INFO(dev)->gen >= 7) {
1433                 u32 val = I915_READ(SPRCTL(pipe));
1434                 I915_STATE_WARN(val & SPRITE_ENABLE,
1435                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1436                      plane_name(pipe), pipe_name(pipe));
1437         } else if (INTEL_INFO(dev)->gen >= 5) {
1438                 u32 val = I915_READ(DVSCNTR(pipe));
1439                 I915_STATE_WARN(val & DVS_ENABLE,
1440                      "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1441                      plane_name(pipe), pipe_name(pipe));
1442         }
1443 }
1444 
1445 static void assert_vblank_disabled(struct drm_crtc *crtc)
1446 {
1447         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1448                 drm_crtc_vblank_put(crtc);
1449 }
1450 
1451 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1452 {
1453         u32 val;
1454         bool enabled;
1455 
1456         I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1457 
1458         val = I915_READ(PCH_DREF_CONTROL);
1459         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1460                             DREF_SUPERSPREAD_SOURCE_MASK));
1461         I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1462 }
1463 
1464 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1465                                            enum pipe pipe)
1466 {
1467         u32 val;
1468         bool enabled;
1469 
1470         val = I915_READ(PCH_TRANSCONF(pipe));
1471         enabled = !!(val & TRANS_ENABLE);
1472         I915_STATE_WARN(enabled,
1473              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1474              pipe_name(pipe));
1475 }
1476 
1477 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1478                             enum pipe pipe, u32 port_sel, u32 val)
1479 {
1480         if ((val & DP_PORT_EN) == 0)
1481                 return false;
1482 
1483         if (HAS_PCH_CPT(dev_priv->dev)) {
1484                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1485                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1486                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1487                         return false;
1488         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1489                 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1490                         return false;
1491         } else {
1492                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1493                         return false;
1494         }
1495         return true;
1496 }
1497 
1498 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1499                               enum pipe pipe, u32 val)
1500 {
1501         if ((val & SDVO_ENABLE) == 0)
1502                 return false;
1503 
1504         if (HAS_PCH_CPT(dev_priv->dev)) {
1505                 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1506                         return false;
1507         } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1508                 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1509                         return false;
1510         } else {
1511                 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1512                         return false;
1513         }
1514         return true;
1515 }
1516 
1517 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1518                               enum pipe pipe, u32 val)
1519 {
1520         if ((val & LVDS_PORT_EN) == 0)
1521                 return false;
1522 
1523         if (HAS_PCH_CPT(dev_priv->dev)) {
1524                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1525                         return false;
1526         } else {
1527                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1528                         return false;
1529         }
1530         return true;
1531 }
1532 
1533 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1534                               enum pipe pipe, u32 val)
1535 {
1536         if ((val & ADPA_DAC_ENABLE) == 0)
1537                 return false;
1538         if (HAS_PCH_CPT(dev_priv->dev)) {
1539                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1540                         return false;
1541         } else {
1542                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1543                         return false;
1544         }
1545         return true;
1546 }
1547 
1548 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1549                                    enum pipe pipe, int reg, u32 port_sel)
1550 {
1551         u32 val = I915_READ(reg);
1552         I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1553              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1554              reg, pipe_name(pipe));
1555 
1556         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1557              && (val & DP_PIPEB_SELECT),
1558              "IBX PCH dp port still using transcoder B\n");
1559 }
1560 
1561 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1562                                      enum pipe pipe, int reg)
1563 {
1564         u32 val = I915_READ(reg);
1565         I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1566              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1567              reg, pipe_name(pipe));
1568 
1569         I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1570              && (val & SDVO_PIPE_B_SELECT),
1571              "IBX PCH hdmi port still using transcoder B\n");
1572 }
1573 
1574 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1575                                       enum pipe pipe)
1576 {
1577         u32 val;
1578 
1579         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1580         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1581         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1582 
1583         val = I915_READ(PCH_ADPA);
1584         I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1585              "PCH VGA enabled on transcoder %c, should be disabled\n",
1586              pipe_name(pipe));
1587 
1588         val = I915_READ(PCH_LVDS);
1589         I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1590              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1591              pipe_name(pipe));
1592 
1593         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1594         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1595         assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1596 }
1597 
1598 static void vlv_enable_pll(struct intel_crtc *crtc,
1599                            const struct intel_crtc_state *pipe_config)
1600 {
1601         struct drm_device *dev = crtc->base.dev;
1602         struct drm_i915_private *dev_priv = dev->dev_private;
1603         int reg = DPLL(crtc->pipe);
1604         u32 dpll = pipe_config->dpll_hw_state.dpll;
1605 
1606         assert_pipe_disabled(dev_priv, crtc->pipe);
1607 
1608         /* No really, not for ILK+ */
1609         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1610 
1611         /* PLL is protected by panel, make sure we can write it */
1612         if (IS_MOBILE(dev_priv->dev))
1613                 assert_panel_unlocked(dev_priv, crtc->pipe);
1614 
1615         I915_WRITE(reg, dpll);
1616         POSTING_READ(reg);
1617         udelay(150);
1618 
1619         if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1620                 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1621 
1622         I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1623         POSTING_READ(DPLL_MD(crtc->pipe));
1624 
1625         /* We do this three times for luck */
1626         I915_WRITE(reg, dpll);
1627         POSTING_READ(reg);
1628         udelay(150); /* wait for warmup */
1629         I915_WRITE(reg, dpll);
1630         POSTING_READ(reg);
1631         udelay(150); /* wait for warmup */
1632         I915_WRITE(reg, dpll);
1633         POSTING_READ(reg);
1634         udelay(150); /* wait for warmup */
1635 }
1636 
1637 static void chv_enable_pll(struct intel_crtc *crtc,
1638                            const struct intel_crtc_state *pipe_config)
1639 {
1640         struct drm_device *dev = crtc->base.dev;
1641         struct drm_i915_private *dev_priv = dev->dev_private;
1642         int pipe = crtc->pipe;
1643         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1644         u32 tmp;
1645 
1646         assert_pipe_disabled(dev_priv, crtc->pipe);
1647 
1648         BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1649 
1650         mutex_lock(&dev_priv->sb_lock);
1651 
1652         /* Enable back the 10bit clock to display controller */
1653         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1654         tmp |= DPIO_DCLKP_EN;
1655         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1656 
1657         mutex_unlock(&dev_priv->sb_lock);
1658 
1659         /*
1660          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1661          */
1662         udelay(1);
1663 
1664         /* Enable PLL */
1665         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1666 
1667         /* Check PLL is locked */
1668         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1669                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1670 
1671         /* not sure when this should be written */
1672         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1673         POSTING_READ(DPLL_MD(pipe));
1674 }
1675 
1676 static int intel_num_dvo_pipes(struct drm_device *dev)
1677 {
1678         struct intel_crtc *crtc;
1679         int count = 0;
1680 
1681         for_each_intel_crtc(dev, crtc)
1682                 count += crtc->base.state->active &&
1683                         intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1684 
1685         return count;
1686 }
1687 
1688 static void i9xx_enable_pll(struct intel_crtc *crtc)
1689 {
1690         struct drm_device *dev = crtc->base.dev;
1691         struct drm_i915_private *dev_priv = dev->dev_private;
1692         int reg = DPLL(crtc->pipe);
1693         u32 dpll = crtc->config->dpll_hw_state.dpll;
1694 
1695         assert_pipe_disabled(dev_priv, crtc->pipe);
1696 
1697         /* No really, not for ILK+ */
1698         BUG_ON(INTEL_INFO(dev)->gen >= 5);
1699 
1700         /* PLL is protected by panel, make sure we can write it */
1701         if (IS_MOBILE(dev) && !IS_I830(dev))
1702                 assert_panel_unlocked(dev_priv, crtc->pipe);
1703 
1704         /* Enable DVO 2x clock on both PLLs if necessary */
1705         if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1706                 /*
1707                  * It appears to be important that we don't enable this
1708                  * for the current pipe before otherwise configuring the
1709                  * PLL. No idea how this should be handled if multiple
1710                  * DVO outputs are enabled simultaneosly.
1711                  */
1712                 dpll |= DPLL_DVO_2X_MODE;
1713                 I915_WRITE(DPLL(!crtc->pipe),
1714                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1715         }
1716 
1717         /*
1718          * Apparently we need to have VGA mode enabled prior to changing
1719          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1720          * dividers, even though the register value does change.
1721          */
1722         I915_WRITE(reg, 0);
1723 
1724         I915_WRITE(reg, dpll);
1725 
1726         /* Wait for the clocks to stabilize. */
1727         POSTING_READ(reg);
1728         udelay(150);
1729 
1730         if (INTEL_INFO(dev)->gen >= 4) {
1731                 I915_WRITE(DPLL_MD(crtc->pipe),
1732                            crtc->config->dpll_hw_state.dpll_md);
1733         } else {
1734                 /* The pixel multiplier can only be updated once the
1735                  * DPLL is enabled and the clocks are stable.
1736                  *
1737                  * So write it again.
1738                  */
1739                 I915_WRITE(reg, dpll);
1740         }
1741 
1742         /* We do this three times for luck */
1743         I915_WRITE(reg, dpll);
1744         POSTING_READ(reg);
1745         udelay(150); /* wait for warmup */
1746         I915_WRITE(reg, dpll);
1747         POSTING_READ(reg);
1748         udelay(150); /* wait for warmup */
1749         I915_WRITE(reg, dpll);
1750         POSTING_READ(reg);
1751         udelay(150); /* wait for warmup */
1752 }
1753 
1754 /**
1755  * i9xx_disable_pll - disable a PLL
1756  * @dev_priv: i915 private structure
1757  * @pipe: pipe PLL to disable
1758  *
1759  * Disable the PLL for @pipe, making sure the pipe is off first.
1760  *
1761  * Note!  This is for pre-ILK only.
1762  */
1763 static void i9xx_disable_pll(struct intel_crtc *crtc)
1764 {
1765         struct drm_device *dev = crtc->base.dev;
1766         struct drm_i915_private *dev_priv = dev->dev_private;
1767         enum pipe pipe = crtc->pipe;
1768 
1769         /* Disable DVO 2x clock on both PLLs if necessary */
1770         if (IS_I830(dev) &&
1771             intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1772             !intel_num_dvo_pipes(dev)) {
1773                 I915_WRITE(DPLL(PIPE_B),
1774                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1775                 I915_WRITE(DPLL(PIPE_A),
1776                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1777         }
1778 
1779         /* Don't disable pipe or pipe PLLs if needed */
1780         if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1781             (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1782                 return;
1783 
1784         /* Make sure the pipe isn't still relying on us */
1785         assert_pipe_disabled(dev_priv, pipe);
1786 
1787         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1788         POSTING_READ(DPLL(pipe));
1789 }
1790 
1791 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1792 {
1793         u32 val;
1794 
1795         /* Make sure the pipe isn't still relying on us */
1796         assert_pipe_disabled(dev_priv, pipe);
1797 
1798         /*
1799          * Leave integrated clock source and reference clock enabled for pipe B.
1800          * The latter is needed for VGA hotplug / manual detection.
1801          */
1802         val = DPLL_VGA_MODE_DIS;
1803         if (pipe == PIPE_B)
1804                 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
1805         I915_WRITE(DPLL(pipe), val);
1806         POSTING_READ(DPLL(pipe));
1807 
1808 }
1809 
1810 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1811 {
1812         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1813         u32 val;
1814 
1815         /* Make sure the pipe isn't still relying on us */
1816         assert_pipe_disabled(dev_priv, pipe);
1817 
1818         /* Set PLL en = 0 */
1819         val = DPLL_SSC_REF_CLK_CHV |
1820                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1821         if (pipe != PIPE_A)
1822                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1823         I915_WRITE(DPLL(pipe), val);
1824         POSTING_READ(DPLL(pipe));
1825 
1826         mutex_lock(&dev_priv->sb_lock);
1827 
1828         /* Disable 10bit clock to display controller */
1829         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1830         val &= ~DPIO_DCLKP_EN;
1831         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1832 
1833         mutex_unlock(&dev_priv->sb_lock);
1834 }
1835 
1836 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1837                          struct intel_digital_port *dport,
1838                          unsigned int expected_mask)
1839 {
1840         u32 port_mask;
1841         int dpll_reg;
1842 
1843         switch (dport->port) {
1844         case PORT_B:
1845                 port_mask = DPLL_PORTB_READY_MASK;
1846                 dpll_reg = DPLL(0);
1847                 break;
1848         case PORT_C:
1849                 port_mask = DPLL_PORTC_READY_MASK;
1850                 dpll_reg = DPLL(0);
1851                 expected_mask <<= 4;
1852                 break;
1853         case PORT_D:
1854                 port_mask = DPLL_PORTD_READY_MASK;
1855                 dpll_reg = DPIO_PHY_STATUS;
1856                 break;
1857         default:
1858                 BUG();
1859         }
1860 
1861         if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1862                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1863                      port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1864 }
1865 
1866 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1867 {
1868         struct drm_device *dev = crtc->base.dev;
1869         struct drm_i915_private *dev_priv = dev->dev_private;
1870         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1871 
1872         if (WARN_ON(pll == NULL))
1873                 return;
1874 
1875         WARN_ON(!pll->config.crtc_mask);
1876         if (pll->active == 0) {
1877                 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1878                 WARN_ON(pll->on);
1879                 assert_shared_dpll_disabled(dev_priv, pll);
1880 
1881                 pll->mode_set(dev_priv, pll);
1882         }
1883 }
1884 
1885 /**
1886  * intel_enable_shared_dpll - enable PCH PLL
1887  * @dev_priv: i915 private structure
1888  * @pipe: pipe PLL to enable
1889  *
1890  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1891  * drives the transcoder clock.
1892  */
1893 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1894 {
1895         struct drm_device *dev = crtc->base.dev;
1896         struct drm_i915_private *dev_priv = dev->dev_private;
1897         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1898 
1899         if (WARN_ON(pll == NULL))
1900                 return;
1901 
1902         if (WARN_ON(pll->config.crtc_mask == 0))
1903                 return;
1904 
1905         DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1906                       pll->name, pll->active, pll->on,
1907                       crtc->base.base.id);
1908 
1909         if (pll->active++) {
1910                 WARN_ON(!pll->on);
1911                 assert_shared_dpll_enabled(dev_priv, pll);
1912                 return;
1913         }
1914         WARN_ON(pll->on);
1915 
1916         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1917 
1918         DRM_DEBUG_KMS("enabling %s\n", pll->name);
1919         pll->enable(dev_priv, pll);
1920         pll->on = true;
1921 }
1922 
1923 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1924 {
1925         struct drm_device *dev = crtc->base.dev;
1926         struct drm_i915_private *dev_priv = dev->dev_private;
1927         struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1928 
1929         /* PCH only available on ILK+ */
1930         if (INTEL_INFO(dev)->gen < 5)
1931                 return;
1932 
1933         if (pll == NULL)
1934                 return;
1935 
1936         if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1937                 return;
1938 
1939         DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1940                       pll->name, pll->active, pll->on,
1941                       crtc->base.base.id);
1942 
1943         if (WARN_ON(pll->active == 0)) {
1944                 assert_shared_dpll_disabled(dev_priv, pll);
1945                 return;
1946         }
1947 
1948         assert_shared_dpll_enabled(dev_priv, pll);
1949         WARN_ON(!pll->on);
1950         if (--pll->active)
1951                 return;
1952 
1953         DRM_DEBUG_KMS("disabling %s\n", pll->name);
1954         pll->disable(dev_priv, pll);
1955         pll->on = false;
1956 
1957         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1958 }
1959 
1960 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1961                                            enum pipe pipe)
1962 {
1963         struct drm_device *dev = dev_priv->dev;
1964         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1965         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1966         uint32_t reg, val, pipeconf_val;
1967 
1968         /* PCH only available on ILK+ */
1969         BUG_ON(!HAS_PCH_SPLIT(dev));
1970 
1971         /* Make sure PCH DPLL is enabled */
1972         assert_shared_dpll_enabled(dev_priv,
1973                                    intel_crtc_to_shared_dpll(intel_crtc));
1974 
1975         /* FDI must be feeding us bits for PCH ports */
1976         assert_fdi_tx_enabled(dev_priv, pipe);
1977         assert_fdi_rx_enabled(dev_priv, pipe);
1978 
1979         if (HAS_PCH_CPT(dev)) {
1980                 /* Workaround: Set the timing override bit before enabling the
1981                  * pch transcoder. */
1982                 reg = TRANS_CHICKEN2(pipe);
1983                 val = I915_READ(reg);
1984                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1985                 I915_WRITE(reg, val);
1986         }
1987 
1988         reg = PCH_TRANSCONF(pipe);
1989         val = I915_READ(reg);
1990         pipeconf_val = I915_READ(PIPECONF(pipe));
1991 
1992         if (HAS_PCH_IBX(dev_priv->dev)) {
1993                 /*
1994                  * Make the BPC in transcoder be consistent with
1995                  * that in pipeconf reg. For HDMI we must use 8bpc
1996                  * here for both 8bpc and 12bpc.
1997                  */
1998                 val &= ~PIPECONF_BPC_MASK;
1999                 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
2000                         val |= PIPECONF_8BPC;
2001                 else
2002                         val |= pipeconf_val & PIPECONF_BPC_MASK;
2003         }
2004 
2005         val &= ~TRANS_INTERLACE_MASK;
2006         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2007                 if (HAS_PCH_IBX(dev_priv->dev) &&
2008                     intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
2009                         val |= TRANS_LEGACY_INTERLACED_ILK;
2010                 else
2011                         val |= TRANS_INTERLACED;
2012         else
2013                 val |= TRANS_PROGRESSIVE;
2014 
2015         I915_WRITE(reg, val | TRANS_ENABLE);
2016         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2017                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2018 }
2019 
2020 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2021                                       enum transcoder cpu_transcoder)
2022 {
2023         u32 val, pipeconf_val;
2024 
2025         /* PCH only available on ILK+ */
2026         BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2027 
2028         /* FDI must be feeding us bits for PCH ports */
2029         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2030         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2031 
2032         /* Workaround: set timing override bit. */
2033         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2034         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2035         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2036 
2037         val = TRANS_ENABLE;
2038         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2039 
2040         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2041             PIPECONF_INTERLACED_ILK)
2042                 val |= TRANS_INTERLACED;
2043         else
2044                 val |= TRANS_PROGRESSIVE;
2045 
2046         I915_WRITE(LPT_TRANSCONF, val);
2047         if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2048                 DRM_ERROR("Failed to enable PCH transcoder\n");
2049 }
2050 
2051 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2052                                             enum pipe pipe)
2053 {
2054         struct drm_device *dev = dev_priv->dev;
2055         uint32_t reg, val;
2056 
2057         /* FDI relies on the transcoder */
2058         assert_fdi_tx_disabled(dev_priv, pipe);
2059         assert_fdi_rx_disabled(dev_priv, pipe);
2060 
2061         /* Ports must be off as well */
2062         assert_pch_ports_disabled(dev_priv, pipe);
2063 
2064         reg = PCH_TRANSCONF(pipe);
2065         val = I915_READ(reg);
2066         val &= ~TRANS_ENABLE;
2067         I915_WRITE(reg, val);
2068         /* wait for PCH transcoder off, transcoder state */
2069         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2070                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2071 
2072         if (!HAS_PCH_IBX(dev)) {
2073                 /* Workaround: Clear the timing override chicken bit again. */
2074                 reg = TRANS_CHICKEN2(pipe);
2075                 val = I915_READ(reg);
2076                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2077                 I915_WRITE(reg, val);
2078         }
2079 }
2080 
2081 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2082 {
2083         u32 val;
2084 
2085         val = I915_READ(LPT_TRANSCONF);
2086         val &= ~TRANS_ENABLE;
2087         I915_WRITE(LPT_TRANSCONF, val);
2088         /* wait for PCH transcoder off, transcoder state */
2089         if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2090                 DRM_ERROR("Failed to disable PCH transcoder\n");
2091 
2092         /* Workaround: clear timing override bit. */
2093         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2094         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2095         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2096 }
2097 
2098 /**
2099  * intel_enable_pipe - enable a pipe, asserting requirements
2100  * @crtc: crtc responsible for the pipe
2101  *
2102  * Enable @crtc's pipe, making sure that various hardware specific requirements
2103  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2104  */
2105 static void intel_enable_pipe(struct intel_crtc *crtc)
2106 {
2107         struct drm_device *dev = crtc->base.dev;
2108         struct drm_i915_private *dev_priv = dev->dev_private;
2109         enum pipe pipe = crtc->pipe;
2110         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
2111                                                                       pipe);
2112         enum pipe pch_transcoder;
2113         int reg;
2114         u32 val;
2115 
2116         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2117 
2118         assert_planes_disabled(dev_priv, pipe);
2119         assert_cursor_disabled(dev_priv, pipe);
2120         assert_sprites_disabled(dev_priv, pipe);
2121 
2122         if (HAS_PCH_LPT(dev_priv->dev))
2123                 pch_transcoder = TRANSCODER_A;
2124         else
2125                 pch_transcoder = pipe;
2126 
2127         /*
2128          * A pipe without a PLL won't actually be able to drive bits from
2129          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2130          * need the check.
2131          */
2132         if (HAS_GMCH_DISPLAY(dev_priv->dev))
2133                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2134                         assert_dsi_pll_enabled(dev_priv);
2135                 else
2136                         assert_pll_enabled(dev_priv, pipe);
2137         else {
2138                 if (crtc->config->has_pch_encoder) {
2139                         /* if driving the PCH, we need FDI enabled */
2140                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2141                         assert_fdi_tx_pll_enabled(dev_priv,
2142                                                   (enum pipe) cpu_transcoder);
2143                 }
2144                 /* FIXME: assert CPU port conditions for SNB+ */
2145         }
2146 
2147         reg = PIPECONF(cpu_transcoder);
2148         val = I915_READ(reg);
2149         if (val & PIPECONF_ENABLE) {
2150                 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2151                           (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2152                 return;
2153         }
2154 
2155         I915_WRITE(reg, val | PIPECONF_ENABLE);
2156         POSTING_READ(reg);
2157 }
2158 
2159 /**
2160  * intel_disable_pipe - disable a pipe, asserting requirements
2161  * @crtc: crtc whose pipes is to be disabled
2162  *
2163  * Disable the pipe of @crtc, making sure that various hardware
2164  * specific requirements are met, if applicable, e.g. plane
2165  * disabled, panel fitter off, etc.
2166  *
2167  * Will wait until the pipe has shut down before returning.
2168  */
2169 static void intel_disable_pipe(struct intel_crtc *crtc)
2170 {
2171         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2172         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2173         enum pipe pipe = crtc->pipe;
2174         int reg;
2175         u32 val;
2176 
2177         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2178 
2179         /*
2180          * Make sure planes won't keep trying to pump pixels to us,
2181          * or we might hang the display.
2182          */
2183         assert_planes_disabled(dev_priv, pipe);
2184         assert_cursor_disabled(dev_priv, pipe);
2185         assert_sprites_disabled(dev_priv, pipe);
2186 
2187         reg = PIPECONF(cpu_transcoder);
2188         val = I915_READ(reg);
2189         if ((val & PIPECONF_ENABLE) == 0)
2190                 return;
2191 
2192         /*
2193          * Double wide has implications for planes
2194          * so best keep it disabled when not needed.
2195          */
2196         if (crtc->config->double_wide)
2197                 val &= ~PIPECONF_DOUBLE_WIDE;
2198 
2199         /* Don't disable pipe or pipe PLLs if needed */
2200         if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2201             !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2202                 val &= ~PIPECONF_ENABLE;
2203 
2204         I915_WRITE(reg, val);
2205         if ((val & PIPECONF_ENABLE) == 0)
2206                 intel_wait_for_pipe_off(crtc);
2207 }
2208 
2209 static bool need_vtd_wa(struct drm_device *dev)
2210 {
2211 #ifdef CONFIG_INTEL_IOMMU
2212         if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2213                 return true;
2214 #endif
2215         return false;
2216 }
2217 
2218 unsigned int
2219 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2220                   uint64_t fb_format_modifier, unsigned int plane)
2221 {
2222         unsigned int tile_height;
2223         uint32_t pixel_bytes;
2224 
2225         switch (fb_format_modifier) {
2226         case DRM_FORMAT_MOD_NONE:
2227                 tile_height = 1;
2228                 break;
2229         case I915_FORMAT_MOD_X_TILED:
2230                 tile_height = IS_GEN2(dev) ? 16 : 8;
2231                 break;
2232         case I915_FORMAT_MOD_Y_TILED:
2233                 tile_height = 32;
2234                 break;
2235         case I915_FORMAT_MOD_Yf_TILED:
2236                 pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
2237                 switch (pixel_bytes) {
2238                 default:
2239                 case 1:
2240                         tile_height = 64;
2241                         break;
2242                 case 2:
2243                 case 4:
2244                         tile_height = 32;
2245                         break;
2246                 case 8:
2247                         tile_height = 16;
2248                         break;
2249                 case 16:
2250                         WARN_ONCE(1,
2251                                   "128-bit pixels are not supported for display!");
2252                         tile_height = 16;
2253                         break;
2254                 }
2255                 break;
2256         default:
2257                 MISSING_CASE(fb_format_modifier);
2258                 tile_height = 1;
2259                 break;
2260         }
2261 
2262         return tile_height;
2263 }
2264 
2265 unsigned int
2266 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2267                       uint32_t pixel_format, uint64_t fb_format_modifier)
2268 {
2269         return ALIGN(height, intel_tile_height(dev, pixel_format,
2270                                                fb_format_modifier, 0));
2271 }
2272 
2273 static int
2274 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2275                         const struct drm_plane_state *plane_state)
2276 {
2277         struct intel_rotation_info *info = &view->rotation_info;
2278         unsigned int tile_height, tile_pitch;
2279 
2280         *view = i915_ggtt_view_normal;
2281 
2282         if (!plane_state)
2283                 return 0;
2284 
2285         if (!intel_rotation_90_or_270(plane_state->rotation))
2286                 return 0;
2287 
2288         *view = i915_ggtt_view_rotated;
2289 
2290         info->height = fb->height;
2291         info->pixel_format = fb->pixel_format;
2292         info->pitch = fb->pitches[0];
2293         info->uv_offset = fb->offsets[1];
2294         info->fb_modifier = fb->modifier[0];
2295 
2296         tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2297                                         fb->modifier[0], 0);
2298         tile_pitch = PAGE_SIZE / tile_height;
2299         info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2300         info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2301         info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2302 
2303         if (info->pixel_format == DRM_FORMAT_NV12) {
2304                 tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2305                                                 fb->modifier[0], 1);
2306                 tile_pitch = PAGE_SIZE / tile_height;
2307                 info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2308                 info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2309                                                      tile_height);
2310                 info->size_uv = info->width_pages_uv * info->height_pages_uv *
2311                                 PAGE_SIZE;
2312         }
2313 
2314         return 0;
2315 }
2316 
2317 static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2318 {
2319         if (INTEL_INFO(dev_priv)->gen >= 9)
2320                 return 256 * 1024;
2321         else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2322                  IS_VALLEYVIEW(dev_priv))
2323                 return 128 * 1024;
2324         else if (INTEL_INFO(dev_priv)->gen >= 4)
2325                 return 4 * 1024;
2326         else
2327                 return 0;
2328 }
2329 
2330 int
2331 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2332                            struct drm_framebuffer *fb,
2333                            const struct drm_plane_state *plane_state,
2334                            struct intel_engine_cs *pipelined,
2335                            struct drm_i915_gem_request **pipelined_request)
2336 {
2337         struct drm_device *dev = fb->dev;
2338         struct drm_i915_private *dev_priv = dev->dev_private;
2339         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2340         struct i915_ggtt_view view;
2341         u32 alignment;
2342         int ret;
2343 
2344         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2345 
2346         switch (fb->modifier[0]) {
2347         case DRM_FORMAT_MOD_NONE:
2348                 alignment = intel_linear_alignment(dev_priv);
2349                 break;
2350         case I915_FORMAT_MOD_X_TILED:
2351                 if (INTEL_INFO(dev)->gen >= 9)
2352                         alignment = 256 * 1024;
2353                 else {
2354                         /* pin() will align the object as required by fence */
2355                         alignment = 0;
2356                 }
2357                 break;
2358         case I915_FORMAT_MOD_Y_TILED:
2359         case I915_FORMAT_MOD_Yf_TILED:
2360                 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2361                           "Y tiling bo slipped through, driver bug!\n"))
2362                         return -EINVAL;
2363                 alignment = 1 * 1024 * 1024;
2364                 break;
2365         default:
2366                 MISSING_CASE(fb->modifier[0]);
2367                 return -EINVAL;
2368         }
2369 
2370         ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2371         if (ret)
2372                 return ret;
2373 
2374         /* Note that the w/a also requires 64 PTE of padding following the
2375          * bo. We currently fill all unused PTE with the shadow page and so
2376          * we should always have valid PTE following the scanout preventing
2377          * the VT-d warning.
2378          */
2379         if (need_vtd_wa(dev) && alignment < 256 * 1024)
2380                 alignment = 256 * 1024;
2381 
2382         /*
2383          * Global gtt pte registers are special registers which actually forward
2384          * writes to a chunk of system memory. Which means that there is no risk
2385          * that the register values disappear as soon as we call
2386          * intel_runtime_pm_put(), so it is correct to wrap only the
2387          * pin/unpin/fence and not more.
2388          */
2389         intel_runtime_pm_get(dev_priv);
2390 
2391         dev_priv->mm.interruptible = false;
2392         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
2393                                                    pipelined_request, &view);
2394         if (ret)
2395                 goto err_interruptible;
2396 
2397         /* Install a fence for tiled scan-out. Pre-i965 always needs a
2398          * fence, whereas 965+ only requires a fence if using
2399          * framebuffer compression.  For simplicity, we always install
2400          * a fence as the cost is not that onerous.
2401          */
2402         if (view.type == I915_GGTT_VIEW_NORMAL) {
2403                 ret = i915_gem_object_get_fence(obj);
2404                 if (ret == -EDEADLK) {
2405                         /*
2406                          * -EDEADLK means there are no free fences
2407                          * no pending flips.
2408                          *
2409                          * This is propagated to atomic, but it uses
2410                          * -EDEADLK to force a locking recovery, so
2411                          * change the returned error to -EBUSY.
2412                          */
2413                         ret = -EBUSY;
2414                         goto err_unpin;
2415                 } else if (ret)
2416                         goto err_unpin;
2417 
2418                 i915_gem_object_pin_fence(obj);
2419         }
2420 
2421         dev_priv->mm.interruptible = true;
2422         intel_runtime_pm_put(dev_priv);
2423         return 0;
2424 
2425 err_unpin:
2426         i915_gem_object_unpin_from_display_plane(obj, &view);
2427 err_interruptible:
2428         dev_priv->mm.interruptible = true;
2429         intel_runtime_pm_put(dev_priv);
2430         return ret;
2431 }
2432 
2433 static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2434                                const struct drm_plane_state *plane_state)
2435 {
2436         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2437         struct i915_ggtt_view view;
2438         int ret;
2439 
2440         WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2441 
2442         ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2443         WARN_ONCE(ret, "Couldn't get view from plane state!");
2444 
2445         if (view.type == I915_GGTT_VIEW_NORMAL)
2446                 i915_gem_object_unpin_fence(obj);
2447 
2448         i915_gem_object_unpin_from_display_plane(obj, &view);
2449 }
2450 
2451 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2452  * is assumed to be a power-of-two. */
2453 unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
2454                                              int *x, int *y,
2455                                              unsigned int tiling_mode,
2456                                              unsigned int cpp,
2457                                              unsigned int pitch)
2458 {
2459         if (tiling_mode != I915_TILING_NONE) {
2460                 unsigned int tile_rows, tiles;
2461 
2462                 tile_rows = *y / 8;
2463                 *y %= 8;
2464 
2465                 tiles = *x / (512/cpp);
2466                 *x %= 512/cpp;
2467 
2468                 return tile_rows * pitch * 8 + tiles * 4096;
2469         } else {
2470                 unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
2471                 unsigned int offset;
2472 
2473                 offset = *y * pitch + *x * cpp;
2474                 *y = (offset & alignment) / pitch;
2475                 *x = ((offset & alignment) - *y * pitch) / cpp;
2476                 return offset & ~alignment;
2477         }
2478 }
2479 
2480 static int i9xx_format_to_fourcc(int format)
2481 {
2482         switch (format) {
2483         case DISPPLANE_8BPP:
2484                 return DRM_FORMAT_C8;
2485         case DISPPLANE_BGRX555:
2486                 return DRM_FORMAT_XRGB1555;
2487         case DISPPLANE_BGRX565:
2488                 return DRM_FORMAT_RGB565;
2489         default:
2490         case DISPPLANE_BGRX888:
2491                 return DRM_FORMAT_XRGB8888;
2492         case DISPPLANE_RGBX888:
2493                 return DRM_FORMAT_XBGR8888;
2494         case DISPPLANE_BGRX101010:
2495                 return DRM_FORMAT_XRGB2101010;
2496         case DISPPLANE_RGBX101010:
2497                 return DRM_FORMAT_XBGR2101010;
2498         }
2499 }
2500 
2501 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2502 {
2503         switch (format) {
2504         case PLANE_CTL_FORMAT_RGB_565:
2505                 return DRM_FORMAT_RGB565;
2506         default:
2507         case PLANE_CTL_FORMAT_XRGB_8888:
2508                 if (rgb_order) {
2509                         if (alpha)
2510                                 return DRM_FORMAT_ABGR8888;
2511                         else
2512                                 return DRM_FORMAT_XBGR8888;
2513                 } else {
2514                         if (alpha)
2515                                 return DRM_FORMAT_ARGB8888;
2516                         else
2517                                 return DRM_FORMAT_XRGB8888;
2518                 }
2519         case PLANE_CTL_FORMAT_XRGB_2101010:
2520                 if (rgb_order)
2521                         return DRM_FORMAT_XBGR2101010;
2522                 else
2523                         return DRM_FORMAT_XRGB2101010;
2524         }
2525 }
2526 
2527 static bool
2528 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2529                               struct intel_initial_plane_config *plane_config)
2530 {
2531         struct drm_device *dev = crtc->base.dev;
2532         struct drm_i915_private *dev_priv = to_i915(dev);
2533         struct drm_i915_gem_object *obj = NULL;
2534         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2535         struct drm_framebuffer *fb = &plane_config->fb->base;
2536         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2537         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2538                                     PAGE_SIZE);
2539 
2540         size_aligned -= base_aligned;
2541 
2542         if (plane_config->size == 0)
2543                 return false;
2544 
2545         /* If the FB is too big, just don't use it since fbdev is not very
2546          * important and we should probably use that space with FBC or other
2547          * features. */
2548         if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2549                 return false;
2550 
2551         obj = i915_gem_object_create_stolen_for_preallocated(dev,
2552                                                              base_aligned,
2553                                                              base_aligned,
2554                                                              size_aligned);
2555         if (!obj)
2556                 return false;
2557 
2558         obj->tiling_mode = plane_config->tiling;
2559         if (obj->tiling_mode == I915_TILING_X)
2560                 obj->stride = fb->pitches[0];
2561 
2562         mode_cmd.pixel_format = fb->pixel_format;
2563         mode_cmd.width = fb->width;
2564         mode_cmd.height = fb->height;
2565         mode_cmd.pitches[0] = fb->pitches[0];
2566         mode_cmd.modifier[0] = fb->modifier[0];
2567         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2568 
2569         mutex_lock(&dev->struct_mutex);
2570         if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2571                                    &mode_cmd, obj)) {
2572                 DRM_DEBUG_KMS("intel fb init failed\n");
2573                 goto out_unref_obj;
2574         }
2575         mutex_unlock(&dev->struct_mutex);
2576 
2577         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2578         return true;
2579 
2580 out_unref_obj:
2581         drm_gem_object_unreference(&obj->base);
2582         mutex_unlock(&dev->struct_mutex);
2583         return false;
2584 }
2585 
2586 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2587 static void
2588 update_state_fb(struct drm_plane *plane)
2589 {
2590         if (plane->fb == plane->state->fb)
2591                 return;
2592 
2593         if (plane->state->fb)
2594                 drm_framebuffer_unreference(plane->state->fb);
2595         plane->state->fb = plane->fb;
2596         if (plane->state->fb)
2597                 drm_framebuffer_reference(plane->state->fb);
2598 }
2599 
2600 static void
2601 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2602                              struct intel_initial_plane_config *plane_config)
2603 {
2604         struct drm_device *dev = intel_crtc->base.dev;
2605         struct drm_i915_private *dev_priv = dev->dev_private;
2606         struct drm_crtc *c;
2607         struct intel_crtc *i;
2608         struct drm_i915_gem_object *obj;
2609         struct drm_plane *primary = intel_crtc->base.primary;
2610         struct drm_plane_state *plane_state = primary->state;
2611         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2612         struct intel_plane *intel_plane = to_intel_plane(primary);
2613         struct drm_framebuffer *fb;
2614 
2615         if (!plane_config->fb)
2616                 return;
2617 
2618         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2619                 fb = &plane_config->fb->base;
2620                 goto valid_fb;
2621         }
2622 
2623         kfree(plane_config->fb);
2624 
2625         /*
2626          * Failed to alloc the obj, check to see if we should share
2627          * an fb with another CRTC instead
2628          */
2629         for_each_crtc(dev, c) {
2630                 i = to_intel_crtc(c);
2631 
2632                 if (c == &intel_crtc->base)
2633                         continue;
2634 
2635                 if (!i->active)
2636                         continue;
2637 
2638                 fb = c->primary->fb;
2639                 if (!fb)
2640                         continue;
2641 
2642                 obj = intel_fb_obj(fb);
2643                 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2644                         drm_framebuffer_reference(fb);
2645                         goto valid_fb;
2646                 }
2647         }
2648 
2649         /*
2650          * We've failed to reconstruct the BIOS FB.  Current display state
2651          * indicates that the primary plane is visible, but has a NULL FB,
2652          * which will lead to problems later if we don't fix it up.  The
2653          * simplest solution is to just disable the primary plane now and
2654          * pretend the BIOS never had it enabled.
2655          */
2656         to_intel_plane_state(plane_state)->visible = false;
2657         crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2658         intel_pre_disable_primary(&intel_crtc->base);
2659         intel_plane->disable_plane(primary, &intel_crtc->base);
2660 
2661         return;
2662 
2663 valid_fb:
2664         plane_state->src_x = 0;
2665         plane_state->src_y = 0;
2666         plane_state->src_w = fb->width << 16;
2667         plane_state->src_h = fb->height << 16;
2668 
2669         plane_state->crtc_x = 0;
2670         plane_state->crtc_y = 0;
2671         plane_state->crtc_w = fb->width;
2672         plane_state->crtc_h = fb->height;
2673 
2674         obj = intel_fb_obj(fb);
2675         if (obj->tiling_mode != I915_TILING_NONE)
2676                 dev_priv->preserve_bios_swizzle = true;
2677 
2678         drm_framebuffer_reference(fb);
2679         primary->fb = primary->state->fb = fb;
2680         primary->crtc = primary->state->crtc = &intel_crtc->base;
2681         intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2682         obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2683 }
2684 
2685 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2686                                       struct drm_framebuffer *fb,
2687                                       int x, int y)
2688 {
2689         struct drm_device *dev = crtc->dev;
2690         struct drm_i915_private *dev_priv = dev->dev_private;
2691         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2692         struct drm_plane *primary = crtc->primary;
2693         bool visible = to_intel_plane_state(primary->state)->visible;
2694         struct drm_i915_gem_object *obj;
2695         int plane = intel_crtc->plane;
2696         unsigned long linear_offset;
2697         u32 dspcntr;
2698         u32 reg = DSPCNTR(plane);
2699         int pixel_size;
2700 
2701         if (!visible || !fb) {
2702                 I915_WRITE(reg, 0);
2703                 if (INTEL_INFO(dev)->gen >= 4)
2704                         I915_WRITE(DSPSURF(plane), 0);
2705                 else
2706                         I915_WRITE(DSPADDR(plane), 0);
2707                 POSTING_READ(reg);
2708                 return;
2709         }
2710 
2711         obj = intel_fb_obj(fb);
2712         if (WARN_ON(obj == NULL))
2713                 return;
2714 
2715         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2716 
2717         dspcntr = DISPPLANE_GAMMA_ENABLE;
2718 
2719         dspcntr |= DISPLAY_PLANE_ENABLE;
2720 
2721         if (INTEL_INFO(dev)->gen < 4) {
2722                 if (intel_crtc->pipe == PIPE_B)
2723                         dspcntr |= DISPPLANE_SEL_PIPE_B;
2724 
2725                 /* pipesrc and dspsize control the size that is scaled from,
2726                  * which should always be the user's requested size.
2727                  */
2728                 I915_WRITE(DSPSIZE(plane),
2729                            ((intel_crtc->config->pipe_src_h - 1) << 16) |
2730                            (intel_crtc->config->pipe_src_w - 1));
2731                 I915_WRITE(DSPPOS(plane), 0);
2732         } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2733                 I915_WRITE(PRIMSIZE(plane),
2734                            ((intel_crtc->config->pipe_src_h - 1) << 16) |
2735                            (intel_crtc->config->pipe_src_w - 1));
2736                 I915_WRITE(PRIMPOS(plane), 0);
2737                 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2738         }
2739 
2740         switch (fb->pixel_format) {
2741         case DRM_FORMAT_C8:
2742                 dspcntr |= DISPPLANE_8BPP;
2743                 break;
2744         case DRM_FORMAT_XRGB1555:
2745                 dspcntr |= DISPPLANE_BGRX555;
2746                 break;
2747         case DRM_FORMAT_RGB565:
2748                 dspcntr |= DISPPLANE_BGRX565;
2749                 break;
2750         case DRM_FORMAT_XRGB8888:
2751                 dspcntr |= DISPPLANE_BGRX888;
2752                 break;
2753         case DRM_FORMAT_XBGR8888:
2754                 dspcntr |= DISPPLANE_RGBX888;
2755                 break;
2756         case DRM_FORMAT_XRGB2101010:
2757                 dspcntr |= DISPPLANE_BGRX101010;
2758                 break;
2759         case DRM_FORMAT_XBGR2101010:
2760                 dspcntr |= DISPPLANE_RGBX101010;
2761                 break;
2762         default:
2763                 BUG();
2764         }
2765 
2766         if (INTEL_INFO(dev)->gen >= 4 &&
2767             obj->tiling_mode != I915_TILING_NONE)
2768                 dspcntr |= DISPPLANE_TILED;
2769 
2770         if (IS_G4X(dev))
2771                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2772 
2773         linear_offset = y * fb->pitches[0] + x * pixel_size;
2774 
2775         if (INTEL_INFO(dev)->gen >= 4) {
2776                 intel_crtc->dspaddr_offset =
2777                         intel_gen4_compute_page_offset(dev_priv,
2778                                                        &x, &y, obj->tiling_mode,
2779                                                        pixel_size,
2780                                                        fb->pitches[0]);
2781                 linear_offset -= intel_crtc->dspaddr_offset;
2782         } else {
2783                 intel_crtc->dspaddr_offset = linear_offset;
2784         }
2785 
2786         if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2787                 dspcntr |= DISPPLANE_ROTATE_180;
2788 
2789                 x += (intel_crtc->config->pipe_src_w - 1);
2790                 y += (intel_crtc->config->pipe_src_h - 1);
2791 
2792                 /* Finding the last pixel of the last line of the display
2793                 data and adding to linear_offset*/
2794                 linear_offset +=
2795                         (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2796                         (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2797         }
2798 
2799         intel_crtc->adjusted_x = x;
2800         intel_crtc->adjusted_y = y;
2801 
2802         I915_WRITE(reg, dspcntr);
2803 
2804         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2805         if (INTEL_INFO(dev)->gen >= 4) {
2806                 I915_WRITE(DSPSURF(plane),
2807                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2808                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2809                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2810         } else
2811                 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2812         POSTING_READ(reg);
2813 }
2814 
2815 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2816                                           struct drm_framebuffer *fb,
2817                                           int x, int y)
2818 {
2819         struct drm_device *dev = crtc->dev;
2820         struct drm_i915_private *dev_priv = dev->dev_private;
2821         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2822         struct drm_plane *primary = crtc->primary;
2823         bool visible = to_intel_plane_state(primary->state)->visible;
2824         struct drm_i915_gem_object *obj;
2825         int plane = intel_crtc->plane;
2826         unsigned long linear_offset;
2827         u32 dspcntr;
2828         u32 reg = DSPCNTR(plane);
2829         int pixel_size;
2830 
2831         if (!visible || !fb) {
2832                 I915_WRITE(reg, 0);
2833                 I915_WRITE(DSPSURF(plane), 0);
2834                 POSTING_READ(reg);
2835                 return;
2836         }
2837 
2838         obj = intel_fb_obj(fb);
2839         if (WARN_ON(obj == NULL))
2840                 return;
2841 
2842         pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2843 
2844         dspcntr = DISPPLANE_GAMMA_ENABLE;
2845 
2846         dspcntr |= DISPLAY_PLANE_ENABLE;
2847 
2848         if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2849                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2850 
2851         switch (fb->pixel_format) {
2852         case DRM_FORMAT_C8:
2853                 dspcntr |= DISPPLANE_8BPP;
2854                 break;
2855         case DRM_FORMAT_RGB565:
2856                 dspcntr |= DISPPLANE_BGRX565;
2857                 break;
2858         case DRM_FORMAT_XRGB8888:
2859                 dspcntr |= DISPPLANE_BGRX888;
2860                 break;
2861         case DRM_FORMAT_XBGR8888:
2862                 dspcntr |= DISPPLANE_RGBX888;
2863                 break;
2864         case DRM_FORMAT_XRGB2101010:
2865                 dspcntr |= DISPPLANE_BGRX101010;
2866                 break;
2867         case DRM_FORMAT_XBGR2101010:
2868                 dspcntr |= DISPPLANE_RGBX101010;
2869                 break;
2870         default:
2871                 BUG();
2872         }
2873 
2874         if (obj->tiling_mode != I915_TILING_NONE)
2875                 dspcntr |= DISPPLANE_TILED;
2876 
2877         if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2878                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2879 
2880         linear_offset = y * fb->pitches[0] + x * pixel_size;
2881         intel_crtc->dspaddr_offset =
2882                 intel_gen4_compute_page_offset(dev_priv,
2883                                                &x, &y, obj->tiling_mode,
2884                                                pixel_size,
2885                                                fb->pitches[0]);
2886         linear_offset -= intel_crtc->dspaddr_offset;
2887         if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2888                 dspcntr |= DISPPLANE_ROTATE_180;
2889 
2890                 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2891                         x += (intel_crtc->config->pipe_src_w - 1);
2892                         y += (intel_crtc->config->pipe_src_h - 1);
2893 
2894                         /* Finding the last pixel of the last line of the display
2895                         data and adding to linear_offset*/
2896                         linear_offset +=
2897                                 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2898                                 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2899                 }
2900         }
2901 
2902         intel_crtc->adjusted_x = x;
2903         intel_crtc->adjusted_y = y;
2904 
2905         I915_WRITE(reg, dspcntr);
2906 
2907         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2908         I915_WRITE(DSPSURF(plane),
2909                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2910         if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2911                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2912         } else {
2913                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2914                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2915         }
2916         POSTING_READ(reg);
2917 }
2918 
2919 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2920                               uint32_t pixel_format)
2921 {
2922         u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2923 
2924         /*
2925          * The stride is either expressed as a multiple of 64 bytes
2926          * chunks for linear buffers or in number of tiles for tiled
2927          * buffers.
2928          */
2929         switch (fb_modifier) {
2930         case DRM_FORMAT_MOD_NONE:
2931                 return 64;
2932         case I915_FORMAT_MOD_X_TILED:
2933                 if (INTEL_INFO(dev)->gen == 2)
2934                         return 128;
2935                 return 512;
2936         case I915_FORMAT_MOD_Y_TILED:
2937                 /* No need to check for old gens and Y tiling since this is
2938                  * about the display engine and those will be blocked before
2939                  * we get here.
2940                  */
2941                 return 128;
2942         case I915_FORMAT_MOD_Yf_TILED:
2943                 if (bits_per_pixel == 8)
2944                         return 64;
2945                 else
2946                         return 128;
2947         default:
2948                 MISSING_CASE(fb_modifier);
2949                 return 64;
2950         }
2951 }
2952 
2953 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
2954                                      struct drm_i915_gem_object *obj,
2955                                      unsigned int plane)
2956 {
2957         const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
2958         struct i915_vma *vma;
2959         unsigned char *offset;
2960 
2961         if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
2962                 view = &i915_ggtt_view_rotated;
2963 
2964         vma = i915_gem_obj_to_ggtt_view(obj, view);
2965         if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2966                 view->type))
2967                 return -1;
2968 
2969         offset = (unsigned char *)vma->node.start;
2970 
2971         if (plane == 1) {
2972                 offset += vma->ggtt_view.rotation_info.uv_start_page *
2973                           PAGE_SIZE;
2974         }
2975 
2976         return (unsigned long)offset;
2977 }
2978 
2979 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2980 {
2981         struct drm_device *dev = intel_crtc->base.dev;
2982         struct drm_i915_private *dev_priv = dev->dev_private;
2983 
2984         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2985         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2986         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2987 }
2988 
2989 /*
2990  * This function detaches (aka. unbinds) unused scalers in hardware
2991  */
2992 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2993 {
2994         struct intel_crtc_scaler_state *scaler_state;
2995         int i;
2996 
2997         scaler_state = &intel_crtc->config->scaler_state;
2998 
2999         /* loop through and disable scalers that aren't in use */
3000         for (i = 0; i < intel_crtc->num_scalers; i++) {
3001                 if (!scaler_state->scalers[i].in_use)
3002                         skl_detach_scaler(intel_crtc, i);
3003         }
3004 }
3005 
3006 u32 skl_plane_ctl_format(uint32_t pixel_format)
3007 {
3008         switch (pixel_format) {
3009         case DRM_FORMAT_C8:
3010                 return PLANE_CTL_FORMAT_INDEXED;
3011         case DRM_FORMAT_RGB565:
3012                 return PLANE_CTL_FORMAT_RGB_565;
3013         case DRM_FORMAT_XBGR8888:
3014                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3015         case DRM_FORMAT_XRGB8888:
3016                 return PLANE_CTL_FORMAT_XRGB_8888;
3017         /*
3018          * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3019          * to be already pre-multiplied. We need to add a knob (or a different
3020          * DRM_FORMAT) for user-space to configure that.
3021          */
3022         case DRM_FORMAT_ABGR8888:
3023                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3024                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3025         case DRM_FORMAT_ARGB8888:
3026                 return PLANE_CTL_FORMAT_XRGB_8888 |
3027                         PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3028         case DRM_FORMAT_XRGB2101010:
3029                 return PLANE_CTL_FORMAT_XRGB_2101010;
3030         case DRM_FORMAT_XBGR2101010:
3031                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3032         case DRM_FORMAT_YUYV:
3033                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3034         case DRM_FORMAT_YVYU:
3035                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3036         case DRM_FORMAT_UYVY:
3037                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3038         case DRM_FORMAT_VYUY:
3039                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3040         default:
3041                 MISSING_CASE(pixel_format);
3042         }
3043 
3044         return 0;
3045 }
3046 
3047 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3048 {
3049         switch (fb_modifier) {
3050         case DRM_FORMAT_MOD_NONE:
3051                 break;
3052         case I915_FORMAT_MOD_X_TILED:
3053                 return PLANE_CTL_TILED_X;
3054         case I915_FORMAT_MOD_Y_TILED:
3055                 return PLANE_CTL_TILED_Y;
3056         case I915_FORMAT_MOD_Yf_TILED:
3057                 return PLANE_CTL_TILED_YF;
3058         default:
3059                 MISSING_CASE(fb_modifier);
3060         }
3061 
3062         return 0;
3063 }
3064 
3065 u32 skl_plane_ctl_rotation(unsigned int rotation)
3066 {
3067         switch (rotation) {
3068         case BIT(DRM_ROTATE_0):
3069                 break;
3070         /*
3071          * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3072          * while i915 HW rotation is clockwise, thats why this swapping.
3073          */
3074         case BIT(DRM_ROTATE_90):
3075                 return PLANE_CTL_ROTATE_270;
3076         case BIT(DRM_ROTATE_180):
3077                 return PLANE_CTL_ROTATE_180;
3078         case BIT(DRM_ROTATE_270):
3079                 return PLANE_CTL_ROTATE_90;
3080         default:
3081                 MISSING_CASE(rotation);
3082         }
3083 
3084         return 0;
3085 }
3086 
3087 static void skylake_update_primary_plane(struct drm_crtc *crtc,
3088                                          struct drm_framebuffer *fb,
3089                                          int x, int y)
3090 {
3091         struct drm_device *dev = crtc->dev;
3092         struct drm_i915_private *dev_priv = dev->dev_private;
3093         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3094         struct drm_plane *plane = crtc->primary;
3095         bool visible = to_intel_plane_state(plane->state)->visible;
3096         struct drm_i915_gem_object *obj;
3097         int pipe = intel_crtc->pipe;
3098         u32 plane_ctl, stride_div, stride;
3099         u32 tile_height, plane_offset, plane_size;
3100         unsigned int rotation;
3101         int x_offset, y_offset;
3102         unsigned long surf_addr;
3103         struct intel_crtc_state *crtc_state = intel_crtc->config;
3104         struct intel_plane_state *plane_state;
3105         int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
3106         int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
3107         int scaler_id = -1;
3108 
3109         plane_state = to_intel_plane_state(plane->state);
3110 
3111         if (!visible || !fb) {
3112                 I915_WRITE(PLANE_CTL(pipe, 0), 0);
3113                 I915_WRITE(PLANE_SURF(pipe, 0), 0);
3114                 POSTING_READ(PLANE_CTL(pipe, 0));
3115                 return;
3116         }
3117 
3118         plane_ctl = PLANE_CTL_ENABLE |
3119                     PLANE_CTL_PIPE_GAMMA_ENABLE |
3120                     PLANE_CTL_PIPE_CSC_ENABLE;
3121 
3122         plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3123         plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3124         plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3125 
3126         rotation = plane->state->rotation;
3127         plane_ctl |= skl_plane_ctl_rotation(rotation);
3128 
3129         obj = intel_fb_obj(fb);
3130         stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3131                                                fb->pixel_format);
3132         surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3133 
3134         WARN_ON(drm_rect_width(&plane_state->src) == 0);
3135 
3136         scaler_id = plane_state->scaler_id;
3137         src_x = plane_state->src.x1 >> 16;
3138         src_y = plane_state->src.y1 >> 16;
3139         src_w = drm_rect_width(&plane_state->src) >> 16;
3140         src_h = drm_rect_height(&plane_state->src) >> 16;
3141         dst_x = plane_state->dst.x1;
3142         dst_y = plane_state->dst.y1;
3143         dst_w = drm_rect_width(&plane_state->dst);
3144         dst_h = drm_rect_height(&plane_state->dst);
3145 
3146         WARN_ON(x != src_x || y != src_y);
3147 
3148         if (intel_rotation_90_or_270(rotation)) {
3149                 /* stride = Surface height in tiles */
3150                 tile_height = intel_tile_height(dev, fb->pixel_format,
3151                                                 fb->modifier[0], 0);
3152                 stride = DIV_ROUND_UP(fb->height, tile_height);
3153                 x_offset = stride * tile_height - y - src_h;
3154                 y_offset = x;
3155                 plane_size = (src_w - 1) << 16 | (src_h - 1);
3156         } else {
3157                 stride = fb->pitches[0] / stride_div;
3158                 x_offset = x;
3159                 y_offset = y;
3160                 plane_size = (src_h - 1) << 16 | (src_w - 1);
3161         }
3162         plane_offset = y_offset << 16 | x_offset;
3163 
3164         intel_crtc->adjusted_x = x_offset;
3165         intel_crtc->adjusted_y = y_offset;
3166 
3167         I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3168         I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3169         I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3170         I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3171 
3172         if (scaler_id >= 0) {
3173                 uint32_t ps_ctrl = 0;
3174 
3175                 WARN_ON(!dst_w || !dst_h);
3176                 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3177                         crtc_state->scaler_state.scalers[scaler_id].mode;
3178                 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3179                 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3180                 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3181                 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3182                 I915_WRITE(PLANE_POS(pipe, 0), 0);
3183         } else {
3184                 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3185         }
3186 
3187         I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3188 
3189         POSTING_READ(PLANE_SURF(pipe, 0));
3190 }
3191 
3192 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3193 static int
3194 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3195                            int x, int y, enum mode_set_atomic state)
3196 {
3197         struct drm_device *dev = crtc->dev;
3198         struct drm_i915_private *dev_priv = dev->dev_private;
3199 
3200         if (dev_priv->fbc.disable_fbc)
3201                 dev_priv->fbc.disable_fbc(dev_priv);
3202 
3203         dev_priv->display.update_primary_plane(crtc, fb, x, y);
3204 
3205         return 0;
3206 }
3207 
3208 static void intel_complete_page_flips(struct drm_device *dev)
3209 {
3210         struct drm_crtc *crtc;
3211 
3212         for_each_crtc(dev, crtc) {
3213                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3214                 enum plane plane = intel_crtc->plane;
3215 
3216                 intel_prepare_page_flip(dev, plane);
3217                 intel_finish_page_flip_plane(dev, plane);
3218         }
3219 }
3220 
3221 static void intel_update_primary_planes(struct drm_device *dev)
3222 {
3223         struct drm_crtc *crtc;
3224 
3225         for_each_crtc(dev, crtc) {
3226                 struct intel_plane *plane = to_intel_plane(crtc->primary);
3227                 struct intel_plane_state *plane_state;
3228 
3229                 drm_modeset_lock_crtc(crtc, &plane->base);
3230 
3231                 plane_state = to_intel_plane_state(plane->base.state);
3232 
3233                 if (plane_state->base.fb)
3234                         plane->commit_plane(&plane->base, plane_state);
3235 
3236                 drm_modeset_unlock_crtc(crtc);
3237         }
3238 }
3239 
3240 void intel_prepare_reset(struct drm_device *dev)
3241 {
3242         /* no reset support for gen2 */
3243         if (IS_GEN2(dev))
3244                 return;
3245 
3246         /* reset doesn't touch the display */
3247         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3248                 return;
3249 
3250         drm_modeset_lock_all(dev);
3251         /*
3252          * Disabling the crtcs gracefully seems nicer. Also the
3253          * g33 docs say we should at least disable all the planes.
3254          */
3255         intel_display_suspend(dev);
3256 }
3257 
3258 void intel_finish_reset(struct drm_device *dev)
3259 {
3260         struct drm_i915_private *dev_priv = to_i915(dev);
3261 
3262         /*
3263          * Flips in the rings will be nuked by the reset,
3264          * so complete all pending flips so that user space
3265          * will get its events and not get stuck.
3266          */
3267         intel_complete_page_flips(dev);
3268 
3269         /* no reset support for gen2 */
3270         if (IS_GEN2(dev))
3271                 return;
3272 
3273         /* reset doesn't touch the display */
3274         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3275                 /*
3276                  * Flips in the rings have been nuked by the reset,
3277                  * so update the base address of all primary
3278                  * planes to the the last fb to make sure we're
3279                  * showing the correct fb after a reset.
3280                  *
3281                  * FIXME: Atomic will make this obsolete since we won't schedule
3282                  * CS-based flips (which might get lost in gpu resets) any more.
3283                  */
3284                 intel_update_primary_planes(dev);
3285                 return;
3286         }
3287 
3288         /*
3289          * The display has been reset as well,
3290          * so need a full re-initialization.
3291          */
3292         intel_runtime_pm_disable_interrupts(dev_priv);
3293         intel_runtime_pm_enable_interrupts(dev_priv);
3294 
3295         intel_modeset_init_hw(dev);
3296 
3297         spin_lock_irq(&dev_priv->irq_lock);
3298         if (dev_priv->display.hpd_irq_setup)
3299                 dev_priv->display.hpd_irq_setup(dev);
3300         spin_unlock_irq(&dev_priv->irq_lock);
3301 
3302         intel_display_resume(dev);
3303 
3304         intel_hpd_init(dev_priv);
3305 
3306         drm_modeset_unlock_all(dev);
3307 }
3308 
3309 static void
3310 intel_finish_fb(struct drm_framebuffer *old_fb)
3311 {
3312         struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
3313         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3314         bool was_interruptible = dev_priv->mm.interruptible;
3315         int ret;
3316 
3317         /* Big Hammer, we also need to ensure that any pending
3318          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
3319          * current scanout is retired before unpinning the old
3320          * framebuffer. Note that we rely on userspace rendering
3321          * into the buffer attached to the pipe they are waiting
3322          * on. If not, userspace generates a GPU hang with IPEHR
3323          * point to the MI_WAIT_FOR_EVENT.
3324          *
3325          * This should only fail upon a hung GPU, in which case we
3326          * can safely continue.
3327          */
3328         dev_priv->mm.interruptible = false;
3329         ret = i915_gem_object_wait_rendering(obj, true);
3330         dev_priv->mm.interruptible = was_interruptible;
3331 
3332         WARN_ON(ret);
3333 }
3334 
3335 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3336 {
3337         struct drm_device *dev = crtc->dev;
3338         struct drm_i915_private *dev_priv = dev->dev_private;
3339         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3340         bool pending;
3341 
3342         if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3343             intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3344                 return false;
3345 
3346         spin_lock_irq(&dev->event_lock);
3347         pending = to_intel_crtc(crtc)->unpin_work != NULL;
3348         spin_unlock_irq(&dev->event_lock);
3349 
3350         return pending;
3351 }
3352 
3353 static void intel_update_pipe_config(struct intel_crtc *crtc,
3354                                      struct intel_crtc_state *old_crtc_state)
3355 {
3356         struct drm_device *dev = crtc->base.dev;
3357         struct drm_i915_private *dev_priv = dev->dev_private;
3358         struct intel_crtc_state *pipe_config =
3359                 to_intel_crtc_state(crtc->base.state);
3360 
3361         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3362         crtc->base.mode = crtc->base.state->mode;
3363 
3364         DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3365                       old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3366                       pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3367 
3368         if (HAS_DDI(dev))
3369                 intel_set_pipe_csc(&crtc->base);
3370 
3371         /*
3372          * Update pipe size and adjust fitter if needed: the reason for this is
3373          * that in compute_mode_changes we check the native mode (not the pfit
3374          * mode) to see if we can flip rather than do a full mode set. In the
3375          * fastboot case, we'll flip, but if we don't update the pipesrc and
3376          * pfit state, we'll end up with a big fb scanned out into the wrong
3377          * sized surface.
3378          */
3379 
3380         I915_WRITE(PIPESRC(crtc->pipe),
3381                    ((pipe_config->pipe_src_w - 1) << 16) |
3382                    (pipe_config->pipe_src_h - 1));
3383 
3384         /* on skylake this is done by detaching scalers */
3385         if (INTEL_INFO(dev)->gen >= 9) {
3386                 skl_detach_scalers(crtc);
3387 
3388                 if (pipe_config->pch_pfit.enabled)
3389                         skylake_pfit_enable(crtc);
3390         } else if (HAS_PCH_SPLIT(dev)) {
3391                 if (pipe_config->pch_pfit.enabled)
3392                         ironlake_pfit_enable(crtc);
3393                 else if (old_crtc_state->pch_pfit.enabled)
3394                         ironlake_pfit_disable(crtc, true);
3395         }
3396 }
3397 
3398 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3399 {
3400         struct drm_device *dev = crtc->dev;
3401         struct drm_i915_private *dev_priv = dev->dev_private;
3402         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3403         int pipe = intel_crtc->pipe;
3404         u32 reg, temp;
3405 
3406         /* enable normal train */
3407         reg = FDI_TX_CTL(pipe);
3408         temp = I915_READ(reg);
3409         if (IS_IVYBRIDGE(dev)) {
3410                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3411                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3412         } else {
3413                 temp &= ~FDI_LINK_TRAIN_NONE;
3414                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3415         }
3416         I915_WRITE(reg, temp);
3417 
3418         reg = FDI_RX_CTL(pipe);
3419         temp = I915_READ(reg);
3420         if (HAS_PCH_CPT(dev)) {
3421                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3422                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3423         } else {
3424                 temp &= ~FDI_LINK_TRAIN_NONE;
3425                 temp |= FDI_LINK_TRAIN_NONE;
3426         }
3427         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3428 
3429         /* wait one idle pattern time */
3430         POSTING_READ(reg);
3431         udelay(1000);
3432 
3433         /* IVB wants error correction enabled */
3434         if (IS_IVYBRIDGE(dev))
3435                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3436                            FDI_FE_ERRC_ENABLE);
3437 }
3438 
3439 /* The FDI link training functions for ILK/Ibexpeak. */
3440 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3441 {
3442         struct drm_device *dev = crtc->dev;
3443         struct drm_i915_private *dev_priv = dev->dev_private;
3444         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3445         int pipe = intel_crtc->pipe;
3446         u32 reg, temp, tries;
3447 
3448         /* FDI needs bits from pipe first */
3449         assert_pipe_enabled(dev_priv, pipe);
3450 
3451         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3452            for train result */
3453         reg = FDI_RX_IMR(pipe);
3454         temp = I915_READ(reg);
3455         temp &= ~FDI_RX_SYMBOL_LOCK;
3456         temp &= ~FDI_RX_BIT_LOCK;
3457         I915_WRITE(reg, temp);
3458         I915_READ(reg);
3459         udelay(150);
3460 
3461         /* enable CPU FDI TX and PCH FDI RX */
3462         reg = FDI_TX_CTL(pipe);
3463         temp = I915_READ(reg);
3464         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3465         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3466         temp &= ~FDI_LINK_TRAIN_NONE;
3467         temp |= FDI_LINK_TRAIN_PATTERN_1;
3468         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3469 
3470         reg = FDI_RX_CTL(pipe);
3471         temp = I915_READ(reg);
3472         temp &= ~FDI_LINK_TRAIN_NONE;
3473         temp |= FDI_LINK_TRAIN_PATTERN_1;
3474         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3475 
3476         POSTING_READ(reg);
3477         udelay(150);
3478 
3479         /* Ironlake workaround, enable clock pointer after FDI enable*/
3480         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3481         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3482                    FDI_RX_PHASE_SYNC_POINTER_EN);
3483 
3484         reg = FDI_RX_IIR(pipe);
3485         for (tries = 0; tries < 5; tries++) {
3486                 temp = I915_READ(reg);
3487                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3488 
3489                 if ((temp & FDI_RX_BIT_LOCK)) {
3490                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3491                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3492                         break;
3493                 }
3494         }
3495         if (tries == 5)
3496                 DRM_ERROR("FDI train 1 fail!\n");
3497 
3498         /* Train 2 */
3499         reg = FDI_TX_CTL(pipe);
3500         temp = I915_READ(reg);
3501         temp &= ~FDI_LINK_TRAIN_NONE;
3502         temp |= FDI_LINK_TRAIN_PATTERN_2;
3503         I915_WRITE(reg, temp);
3504 
3505         reg = FDI_RX_CTL(pipe);
3506         temp = I915_READ(reg);
3507         temp &= ~FDI_LINK_TRAIN_NONE;
3508         temp |= FDI_LINK_TRAIN_PATTERN_2;
3509         I915_WRITE(reg, temp);
3510 
3511         POSTING_READ(reg);
3512         udelay(150);
3513 
3514         reg = FDI_RX_IIR(pipe);
3515         for (tries = 0; tries < 5; tries++) {
3516                 temp = I915_READ(reg);
3517                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3518 
3519                 if (temp & FDI_RX_SYMBOL_LOCK) {
3520                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3521                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3522                         break;
3523                 }
3524         }
3525         if (tries == 5)
3526                 DRM_ERROR("FDI train 2 fail!\n");
3527 
3528         DRM_DEBUG_KMS("FDI train done\n");
3529 
3530 }
3531 
3532 static const int snb_b_fdi_train_param[] = {
3533         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3534         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3535         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3536         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3537 };
3538 
3539 /* The FDI link training functions for SNB/Cougarpoint. */
3540 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3541 {
3542         struct drm_device *dev = crtc->dev;
3543         struct drm_i915_private *dev_priv = dev->dev_private;
3544         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3545         int pipe = intel_crtc->pipe;
3546         u32 reg, temp, i, retry;
3547 
3548         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3549            for train result */
3550         reg = FDI_RX_IMR(pipe);
3551         temp = I915_READ(reg);
3552         temp &= ~FDI_RX_SYMBOL_LOCK;
3553         temp &= ~FDI_RX_BIT_LOCK;
3554         I915_WRITE(reg, temp);
3555 
3556         POSTING_READ(reg);
3557         udelay(150);
3558 
3559         /* enable CPU FDI TX and PCH FDI RX */
3560         reg = FDI_TX_CTL(pipe);
3561         temp = I915_READ(reg);
3562         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3563         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3564         temp &= ~FDI_LINK_TRAIN_NONE;
3565         temp |= FDI_LINK_TRAIN_PATTERN_1;
3566         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3567         /* SNB-B */
3568         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3569         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3570 
3571         I915_WRITE(FDI_RX_MISC(pipe),
3572                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3573 
3574         reg = FDI_RX_CTL(pipe);
3575         temp = I915_READ(reg);
3576         if (HAS_PCH_CPT(dev)) {
3577                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3578                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3579         } else {
3580                 temp &= ~FDI_LINK_TRAIN_NONE;
3581                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3582         }
3583         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3584 
3585         POSTING_READ(reg);
3586         udelay(150);
3587 
3588         for (i = 0; i < 4; i++) {
3589                 reg = FDI_TX_CTL(pipe);
3590                 temp = I915_READ(reg);
3591                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3592                 temp |= snb_b_fdi_train_param[i];
3593                 I915_WRITE(reg, temp);
3594 
3595                 POSTING_READ(reg);
3596                 udelay(500);
3597 
3598                 for (retry = 0; retry < 5; retry++) {
3599                         reg = FDI_RX_IIR(pipe);
3600                         temp = I915_READ(reg);
3601                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3602                         if (temp & FDI_RX_BIT_LOCK) {
3603                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3604                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
3605                                 break;
3606                         }
3607                         udelay(50);
3608                 }
3609                 if (retry < 5)
3610                         break;
3611         }
3612         if (i == 4)
3613                 DRM_ERROR("FDI train 1 fail!\n");
3614 
3615         /* Train 2 */
3616         reg = FDI_TX_CTL(pipe);
3617         temp = I915_READ(reg);
3618         temp &= ~FDI_LINK_TRAIN_NONE;
3619         temp |= FDI_LINK_TRAIN_PATTERN_2;
3620         if (IS_GEN6(dev)) {
3621                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3622                 /* SNB-B */
3623                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3624         }
3625         I915_WRITE(reg, temp);
3626 
3627         reg = FDI_RX_CTL(pipe);
3628         temp = I915_READ(reg);
3629         if (HAS_PCH_CPT(dev)) {
3630                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3631                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3632         } else {
3633                 temp &= ~FDI_LINK_TRAIN_NONE;
3634                 temp |= FDI_LINK_TRAIN_PATTERN_2;
3635         }
3636         I915_WRITE(reg, temp);
3637 
3638         POSTING_READ(reg);
3639         udelay(150);
3640 
3641         for (i = 0; i < 4; i++) {
3642                 reg = FDI_TX_CTL(pipe);
3643                 temp = I915_READ(reg);
3644                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3645                 temp |= snb_b_fdi_train_param[i];
3646                 I915_WRITE(reg, temp);
3647 
3648                 POSTING_READ(reg);
3649                 udelay(500);
3650 
3651                 for (retry = 0; retry < 5; retry++) {
3652                         reg = FDI_RX_IIR(pipe);
3653                         temp = I915_READ(reg);
3654                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3655                         if (temp & FDI_RX_SYMBOL_LOCK) {
3656                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3657                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
3658                                 break;
3659                         }
3660                         udelay(50);
3661                 }
3662                 if (retry < 5)
3663                         break;
3664         }
3665         if (i == 4)
3666                 DRM_ERROR("FDI train 2 fail!\n");
3667 
3668         DRM_DEBUG_KMS("FDI train done.\n");
3669 }
3670 
3671 /* Manual link training for Ivy Bridge A0 parts */
3672 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3673 {
3674         struct drm_device *dev = crtc->dev;
3675         struct drm_i915_private *dev_priv = dev->dev_private;
3676         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3677         int pipe = intel_crtc->pipe;
3678         u32 reg, temp, i, j;
3679 
3680         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3681            for train result */
3682         reg = FDI_RX_IMR(pipe);
3683         temp = I915_READ(reg);
3684         temp &= ~FDI_RX_SYMBOL_LOCK;
3685         temp &= ~FDI_RX_BIT_LOCK;
3686         I915_WRITE(reg, temp);
3687 
3688         POSTING_READ(reg);
3689         udelay(150);
3690 
3691         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3692                       I915_READ(FDI_RX_IIR(pipe)));
3693 
3694         /* Try each vswing and preemphasis setting twice before moving on */
3695         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3696                 /* disable first in case we need to retry */
3697                 reg = FDI_TX_CTL(pipe);
3698                 temp = I915_READ(reg);
3699                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3700                 temp &= ~FDI_TX_ENABLE;
3701                 I915_WRITE(reg, temp);
3702 
3703                 reg = FDI_RX_CTL(pipe);
3704                 temp = I915_READ(reg);
3705                 temp &= ~FDI_LINK_TRAIN_AUTO;
3706                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3707                 temp &= ~FDI_RX_ENABLE;
3708                 I915_WRITE(reg, temp);
3709 
3710                 /* enable CPU FDI TX and PCH FDI RX */
3711                 reg = FDI_TX_CTL(pipe);
3712                 temp = I915_READ(reg);
3713                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3714                 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3715                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3716                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3717                 temp |= snb_b_fdi_train_param[j/2];
3718                 temp |= FDI_COMPOSITE_SYNC;
3719                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3720 
3721                 I915_WRITE(FDI_RX_MISC(pipe),
3722                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3723 
3724                 reg = FDI_RX_CTL(pipe);
3725                 temp = I915_READ(reg);
3726                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3727                 temp |= FDI_COMPOSITE_SYNC;
3728                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3729 
3730                 POSTING_READ(reg);
3731                 udelay(1); /* should be 0.5us */
3732 
3733                 for (i = 0; i < 4; i++) {
3734                         reg = FDI_RX_IIR(pipe);
3735                         temp = I915_READ(reg);
3736                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3737 
3738                         if (temp & FDI_RX_BIT_LOCK ||
3739                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3740                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3741                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3742                                               i);
3743                                 break;
3744                         }
3745                         udelay(1); /* should be 0.5us */
3746                 }
3747                 if (i == 4) {
3748                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3749                         continue;
3750                 }
3751 
3752                 /* Train 2 */
3753                 reg = FDI_TX_CTL(pipe);
3754                 temp = I915_READ(reg);
3755                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3756                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3757                 I915_WRITE(reg, temp);
3758 
3759                 reg = FDI_RX_CTL(pipe);
3760                 temp = I915_READ(reg);
3761                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3762                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3763                 I915_WRITE(reg, temp);
3764 
3765                 POSTING_READ(reg);
3766                 udelay(2); /* should be 1.5us */
3767 
3768                 for (i = 0; i < 4; i++) {
3769                         reg = FDI_RX_IIR(pipe);
3770                         temp = I915_READ(reg);
3771                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3772 
3773                         if (temp & FDI_RX_SYMBOL_LOCK ||
3774                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3775                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3776                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3777                                               i);
3778                                 goto train_done;
3779                         }
3780                         udelay(2); /* should be 1.5us */
3781                 }
3782                 if (i == 4)
3783                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3784         }
3785 
3786 train_done:
3787         DRM_DEBUG_KMS("FDI train done.\n");
3788 }
3789 
3790 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3791 {
3792         struct drm_device *dev = intel_crtc->base.dev;
3793         struct drm_i915_private *dev_priv = dev->dev_private;
3794         int pipe = intel_crtc->pipe;
3795         u32 reg, temp;
3796 
3797 
3798         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3799         reg = FDI_RX_CTL(pipe);
3800         temp = I915_READ(reg);
3801         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3802         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3803         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3804         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3805 
3806         POSTING_READ(reg);
3807         udelay(200);
3808 
3809         /* Switch from Rawclk to PCDclk */
3810         temp = I915_READ(reg);
3811         I915_WRITE(reg, temp | FDI_PCDCLK);
3812 
3813         POSTING_READ(reg);
3814         udelay(200);
3815 
3816         /* Enable CPU FDI TX PLL, always on for Ironlake */
3817         reg = FDI_TX_CTL(pipe);
3818         temp = I915_READ(reg);
3819         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3820                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3821 
3822                 POSTING_READ(reg);
3823                 udelay(100);
3824         }
3825 }
3826 
3827 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3828 {
3829         struct drm_device *dev = intel_crtc->base.dev;
3830         struct drm_i915_private *dev_priv = dev->dev_private;
3831         int pipe = intel_crtc->pipe;
3832         u32 reg, temp;
3833 
3834         /* Switch from PCDclk to Rawclk */
3835         reg = FDI_RX_CTL(pipe);
3836         temp = I915_READ(reg);
3837         I915_WRITE(reg, temp & ~FDI_PCDCLK);
3838 
3839         /* Disable CPU FDI TX PLL */
3840         reg = FDI_TX_CTL(pipe);
3841         temp = I915_READ(reg);
3842         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3843 
3844         POSTING_READ(reg);
3845         udelay(100);
3846 
3847         reg = FDI_RX_CTL(pipe);
3848         temp = I915_READ(reg);
3849         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3850 
3851         /* Wait for the clocks to turn off. */
3852         POSTING_READ(reg);
3853         udelay(100);
3854 }
3855 
3856 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3857 {
3858         struct drm_device *dev = crtc->dev;
3859         struct drm_i915_private *dev_priv = dev->dev_private;
3860         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3861         int pipe = intel_crtc->pipe;
3862         u32 reg, temp;
3863 
3864         /* disable CPU FDI tx and PCH FDI rx */
3865         reg = FDI_TX_CTL(pipe);
3866         temp = I915_READ(reg);
3867         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3868         POSTING_READ(reg);
3869 
3870         reg = FDI_RX_CTL(pipe);
3871         temp = I915_READ(reg);
3872         temp &= ~(0x7 << 16);
3873         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3874         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3875 
3876         POSTING_READ(reg);
3877         udelay(100);
3878 
3879         /* Ironlake workaround, disable clock pointer after downing FDI */
3880         if (HAS_PCH_IBX(dev))
3881                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3882 
3883         /* still set train pattern 1 */
3884         reg = FDI_TX_CTL(pipe);
3885         temp = I915_READ(reg);
3886         temp &= ~FDI_LINK_TRAIN_NONE;
3887         temp |= FDI_LINK_TRAIN_PATTERN_1;
3888         I915_WRITE(reg, temp);
3889 
3890         reg = FDI_RX_CTL(pipe);
3891         temp = I915_READ(reg);
3892         if (HAS_PCH_CPT(dev)) {
3893                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3894                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3895         } else {
3896                 temp &= ~FDI_LINK_TRAIN_NONE;
3897                 temp |= FDI_LINK_TRAIN_PATTERN_1;
3898         }
3899         /* BPC in FDI rx is consistent with that in PIPECONF */
3900         temp &= ~(0x07 << 16);
3901         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3902         I915_WRITE(reg, temp);
3903 
3904         POSTING_READ(reg);
3905         udelay(100);
3906 }
3907 
3908 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3909 {
3910         struct intel_crtc *crtc;
3911 
3912         /* Note that we don't need to be called with mode_config.lock here
3913          * as our list of CRTC objects is static for the lifetime of the
3914          * device and so cannot disappear as we iterate. Similarly, we can
3915          * happily treat the predicates as racy, atomic checks as userspace
3916          * cannot claim and pin a new fb without at least acquring the
3917          * struct_mutex and so serialising with us.
3918          */
3919         for_each_intel_crtc(dev, crtc) {
3920                 if (atomic_read(&crtc->unpin_work_count) == 0)
3921                         continue;
3922 
3923                 if (crtc->unpin_work)
3924                         intel_wait_for_vblank(dev, crtc->pipe);
3925 
3926                 return true;
3927         }
3928 
3929         return false;
3930 }
3931 
3932 static void page_flip_completed(struct intel_crtc *intel_crtc)
3933 {
3934         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3935         struct intel_unpin_work *work = intel_crtc->unpin_work;
3936 
3937         /* ensure that the unpin work is consistent wrt ->pending. */
3938         smp_rmb();
3939         intel_crtc->unpin_work = NULL;
3940 
3941         if (work->event)
3942                 drm_send_vblank_event(intel_crtc->base.dev,
3943                                       intel_crtc->pipe,
3944                                       work->event);
3945 
3946         drm_crtc_vblank_put(&intel_crtc->base);
3947 
3948         wake_up_all(&dev_priv->pending_flip_queue);
3949         queue_work(dev_priv->wq, &work->work);
3950 
3951         trace_i915_flip_complete(intel_crtc->plane,
3952                                  work->pending_flip_obj);
3953 }
3954 
3955 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3956 {
3957         struct drm_device *dev = crtc->dev;
3958         struct drm_i915_private *dev_priv = dev->dev_private;
3959 
3960         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3961         if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3962                                        !intel_crtc_has_pending_flip(crtc),
3963                                        60*HZ) == 0)) {
3964                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3965 
3966                 spin_lock_irq(&dev->event_lock);
3967                 if (intel_crtc->unpin_work) {
3968                         WARN_ONCE(1, "Removing stuck page flip\n");
3969                         page_flip_completed(intel_crtc);
3970                 }
3971                 spin_unlock_irq(&dev->event_lock);
3972         }
3973 
3974         if (crtc->primary->fb) {
3975                 mutex_lock(&dev->struct_mutex);
3976                 intel_finish_fb(crtc->primary->fb);
3977                 mutex_unlock(&dev->struct_mutex);
3978         }
3979 }
3980 
3981 /* Program iCLKIP clock to the desired frequency */
3982 static void lpt_program_iclkip(struct drm_crtc *crtc)
3983 {
3984         struct drm_device *dev = crtc->dev;
3985         struct drm_i915_private *dev_priv = dev->dev_private;
3986         int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3987         u32 divsel, phaseinc, auxdiv, phasedir = 0;
3988         u32 temp;
3989 
3990         mutex_lock(&dev_priv->sb_lock);
3991 
3992         /* It is necessary to ungate the pixclk gate prior to programming
3993          * the divisors, and gate it back when it is done.
3994          */
3995         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3996 
3997         /* Disable SSCCTL */
3998         intel_sbi_write(dev_priv, SBI_SSCCTL6,
3999                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
4000                                 SBI_SSCCTL_DISABLE,
4001                         SBI_ICLK);
4002 
4003         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
4004         if (clock == 20000) {
4005                 auxdiv = 1;
4006                 divsel = 0x41;
4007                 phaseinc = 0x20;
4008         } else {
4009                 /* The iCLK virtual clock root frequency is in MHz,
4010                  * but the adjusted_mode->crtc_clock in in KHz. To get the
4011                  * divisors, it is necessary to divide one by another, so we
4012                  * convert the virtual clock precision to KHz here for higher
4013                  * precision.
4014                  */
4015                 u32 iclk_virtual_root_freq = 172800 * 1000;
4016                 u32 iclk_pi_range = 64;
4017                 u32 desired_divisor, msb_divisor_value, pi_value;
4018 
4019                 desired_divisor = (iclk_virtual_root_freq / clock);
4020                 msb_divisor_value = desired_divisor / iclk_pi_range;
4021                 pi_value = desired_divisor % iclk_pi_range;
4022 
4023                 auxdiv = 0;
4024                 divsel = msb_divisor_value - 2;
4025                 phaseinc = pi_value;
4026         }
4027 
4028         /* This should not happen with any sane values */
4029         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4030                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4031         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4032                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4033 
4034         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4035                         clock,
4036                         auxdiv,
4037                         divsel,
4038                         phasedir,
4039                         phaseinc);
4040 
4041         /* Program SSCDIVINTPHASE6 */
4042         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4043         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4044         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4045         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4046         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4047         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4048         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4049         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4050 
4051         /* Program SSCAUXDIV */
4052         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4053         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4054         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4055         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4056 
4057         /* Enable modulator and associated divider */
4058         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4059         temp &= ~SBI_SSCCTL_DISABLE;
4060         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4061 
4062         /* Wait for initialization time */
4063         udelay(24);
4064 
4065         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4066 
4067         mutex_unlock(&dev_priv->sb_lock);
4068 }
4069 
4070 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4071                                                 enum pipe pch_transcoder)
4072 {
4073         struct drm_device *dev = crtc->base.dev;
4074         struct drm_i915_private *dev_priv = dev->dev_private;
4075         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4076 
4077         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4078                    I915_READ(HTOTAL(cpu_transcoder)));
4079         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4080                    I915_READ(HBLANK(cpu_transcoder)));
4081         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4082                    I915_READ(HSYNC(cpu_transcoder)));
4083 
4084         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4085                    I915_READ(VTOTAL(cpu_transcoder)));
4086         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4087                    I915_READ(VBLANK(cpu_transcoder)));
4088         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4089                    I915_READ(VSYNC(cpu_transcoder)));
4090         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4091                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4092 }
4093 
4094 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4095 {
4096         struct drm_i915_private *dev_priv = dev->dev_private;
4097         uint32_t temp;
4098 
4099         temp = I915_READ(SOUTH_CHICKEN1);
4100         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4101                 return;
4102 
4103         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4104         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4105 
4106         temp &= ~FDI_BC_BIFURCATION_SELECT;
4107         if (enable)
4108                 temp |= FDI_BC_BIFURCATION_SELECT;
4109 
4110         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4111         I915_WRITE(SOUTH_CHICKEN1, temp);
4112         POSTING_READ(SOUTH_CHICKEN1);
4113 }
4114 
4115 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4116 {
4117         struct drm_device *dev = intel_crtc->base.dev;
4118 
4119         switch (intel_crtc->pipe) {
4120         case PIPE_A:
4121                 break;
4122         case PIPE_B:
4123                 if (intel_crtc->config->fdi_lanes > 2)
4124                         cpt_set_fdi_bc_bifurcation(dev, false);
4125                 else
4126                         cpt_set_fdi_bc_bifurcation(dev, true);
4127 
4128                 break;
4129         case PIPE_C:
4130                 cpt_set_fdi_bc_bifurcation(dev, true);
4131 
4132                 break;
4133         default:
4134                 BUG();
4135         }
4136 }
4137 
4138 /*
4139  * Enable PCH resources required for PCH ports:
4140  *   - PCH PLLs
4141  *   - FDI training & RX/TX
4142  *   - update transcoder timings
4143  *   - DP transcoding bits
4144  *   - transcoder
4145  */
4146 static void ironlake_pch_enable(struct drm_crtc *crtc)
4147 {
4148         struct drm_device *dev = crtc->dev;
4149         struct drm_i915_private *dev_priv = dev->dev_private;
4150         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4151         int pipe = intel_crtc->pipe;
4152         u32 reg, temp;
4153 
4154         assert_pch_transcoder_disabled(dev_priv, pipe);
4155 
4156         if (IS_IVYBRIDGE(dev))
4157                 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4158 
4159         /* Write the TU size bits before fdi link training, so that error
4160          * detection works. */
4161         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4162                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4163 
4164         /* For PCH output, training FDI link */
4165         dev_priv->display.fdi_link_train(crtc);
4166 
4167         /* We need to program the right clock selection before writing the pixel
4168          * mutliplier into the DPLL. */
4169         if (HAS_PCH_CPT(dev)) {
4170                 u32 sel;
4171 
4172                 temp = I915_READ(PCH_DPLL_SEL);
4173                 temp |= TRANS_DPLL_ENABLE(pipe);
4174                 sel = TRANS_DPLLB_SEL(pipe);
4175                 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4176                         temp |= sel;
4177                 else
4178                         temp &= ~sel;
4179                 I915_WRITE(PCH_DPLL_SEL, temp);
4180         }
4181 
4182         /* XXX: pch pll's can be enabled any time before we enable the PCH
4183          * transcoder, and we actually should do this to not upset any PCH
4184          * transcoder that already use the clock when we share it.
4185          *
4186          * Note that enable_shared_dpll tries to do the right thing, but
4187          * get_shared_dpll unconditionally resets the pll - we need that to have
4188          * the right LVDS enable sequence. */
4189         intel_enable_shared_dpll(intel_crtc);
4190 
4191         /* set transcoder timing, panel must allow it */
4192         assert_panel_unlocked(dev_priv, pipe);
4193         ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4194 
4195         intel_fdi_normal_train(crtc);
4196 
4197         /* For PCH DP, enable TRANS_DP_CTL */
4198         if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4199                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4200                 reg = TRANS_DP_CTL(pipe);
4201                 temp = I915_READ(reg);
4202                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4203                           TRANS_DP_SYNC_MASK |
4204                           TRANS_DP_BPC_MASK);
4205                 temp |= TRANS_DP_OUTPUT_ENABLE;
4206                 temp |= bpc << 9; /* same format but at 11:9 */
4207 
4208                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
4209                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4210                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
4211                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4212 
4213                 switch (intel_trans_dp_port_sel(crtc)) {
4214                 case PCH_DP_B:
4215                         temp |= TRANS_DP_PORT_SEL_B;
4216                         break;
4217                 case PCH_DP_C:
4218                         temp |= TRANS_DP_PORT_SEL_C;
4219                         break;
4220                 case PCH_DP_D:
4221                         temp |= TRANS_DP_PORT_SEL_D;
4222                         break;
4223                 default:
4224                         BUG();
4225                 }
4226 
4227                 I915_WRITE(reg, temp);
4228         }
4229 
4230         ironlake_enable_pch_transcoder(dev_priv, pipe);
4231 }
4232 
4233 static void lpt_pch_enable(struct drm_crtc *crtc)
4234 {
4235         struct drm_device *dev = crtc->dev;
4236         struct drm_i915_private *dev_priv = dev->dev_private;
4237         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4238         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4239 
4240         assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4241 
4242         lpt_program_iclkip(crtc);
4243 
4244         /* Set transcoder timing. */
4245         ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4246 
4247         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4248 }
4249 
4250 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4251                                                 struct intel_crtc_state *crtc_state)
4252 {
4253         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4254         struct intel_shared_dpll *pll;
4255         struct intel_shared_dpll_config *shared_dpll;
4256         enum intel_dpll_id i;
4257         int max = dev_priv->num_shared_dpll;
4258 
4259         shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4260 
4261         if (HAS_PCH_IBX(dev_priv->dev)) {
4262                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4263                 i = (enum intel_dpll_id) crtc->pipe;
4264                 pll = &dev_priv->shared_dplls[i];
4265 
4266                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4267                               crtc->base.base.id, pll->name);
4268 
4269                 WARN_ON(shared_dpll[i].crtc_mask);
4270 
4271                 goto found;
4272         }
4273 
4274         if (IS_BROXTON(dev_priv->dev)) {
4275                 /* PLL is attached to port in bxt */
4276                 struct intel_encoder *encoder;
4277                 struct intel_digital_port *intel_dig_port;
4278 
4279                 encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4280                 if (WARN_ON(!encoder))
4281                         return NULL;
4282 
4283                 intel_dig_port = enc_to_dig_port(&encoder->base);
4284                 /* 1:1 mapping between ports and PLLs */
4285                 i = (enum intel_dpll_id)intel_dig_port->port;
4286                 pll = &dev_priv->shared_dplls[i];
4287                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4288                         crtc->base.base.id, pll->name);
4289                 WARN_ON(shared_dpll[i].crtc_mask);
4290 
4291                 goto found;
4292         } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4293                 /* Do not consider SPLL */
4294                 max = 2;
4295 
4296         for (i = 0; i < max; i++) {
4297                 pll = &dev_priv->shared_dplls[i];
4298 
4299                 /* Only want to check enabled timings first */
4300                 if (shared_dpll[i].crtc_mask == 0)
4301                         continue;
4302 
4303                 if (memcmp(&crtc_state->dpll_hw_state,
4304                            &shared_dpll[i].hw_state,
4305                            sizeof(crtc_state->dpll_hw_state)) == 0) {
4306                         DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4307                                       crtc->base.base.id, pll->name,
4308                                       shared_dpll[i].crtc_mask,
4309                                       pll->active);
4310                         goto found;
4311                 }
4312         }
4313 
4314         /* Ok no matching timings, maybe there's a free one? */
4315         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4316                 pll = &dev_priv->shared_dplls[i];
4317                 if (shared_dpll[i].crtc_mask == 0) {
4318                         DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4319                                       crtc->base.base.id, pll->name);
4320                         goto found;
4321                 }
4322         }
4323 
4324         return NULL;
4325 
4326 found:
4327         if (shared_dpll[i].crtc_mask == 0)
4328                 shared_dpll[i].hw_state =
4329                         crtc_state->dpll_hw_state;
4330 
4331         crtc_state->shared_dpll = i;
4332         DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4333                          pipe_name(crtc->pipe));
4334 
4335         shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
4336 
4337         return pll;
4338 }
4339 
4340 static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4341 {
4342         struct drm_i915_private *dev_priv = to_i915(state->dev);
4343         struct intel_shared_dpll_config *shared_dpll;
4344         struct intel_shared_dpll *pll;
4345         enum intel_dpll_id i;
4346 
4347         if (!to_intel_atomic_state(state)->dpll_set)
4348                 return;
4349 
4350         shared_dpll = to_intel_atomic_state(state)->shared_dpll;
4351         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4352                 pll = &dev_priv->shared_dplls[i];
4353                 pll->config = shared_dpll[i];
4354         }
4355 }
4356 
4357 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4358 {
4359         struct drm_i915_private *dev_priv = dev->dev_private;
4360         int dslreg = PIPEDSL(pipe);
4361         u32 temp;
4362 
4363         temp = I915_READ(dslreg);
4364         udelay(500);
4365         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4366                 if (wait_for(I915_READ(dslreg) != temp, 5))
4367                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4368         }
4369 }
4370 
4371 static int
4372 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4373                   unsigned scaler_user, int *scaler_id, unsigned int rotation,
4374                   int src_w, int src_h, int dst_w, int dst_h)
4375 {
4376         struct intel_crtc_scaler_state *scaler_state =
4377                 &crtc_state->scaler_state;
4378         struct intel_crtc *intel_crtc =
4379                 to_intel_crtc(crtc_state->base.crtc);
4380         int need_scaling;
4381 
4382         need_scaling = intel_rotation_90_or_270(rotation) ?
4383                 (src_h != dst_w || src_w != dst_h):
4384                 (src_w != dst_w || src_h != dst_h);
4385 
4386         /*
4387          * if plane is being disabled or scaler is no more required or force detach
4388          *  - free scaler binded to this plane/crtc
4389          *  - in order to do this, update crtc->scaler_usage
4390          *
4391          * Here scaler state in crtc_state is set free so that
4392          * scaler can be assigned to other user. Actual register
4393          * update to free the scaler is done in plane/panel-fit programming.
4394          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4395          */
4396         if (force_detach || !need_scaling) {
4397                 if (*scaler_id >= 0) {
4398                         scaler_state->scaler_users &= ~(1 << scaler_user);
4399                         scaler_state->scalers[*scaler_id].in_use = 0;
4400 
4401                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4402                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4403                                 intel_crtc->pipe, scaler_user, *scaler_id,
4404                                 scaler_state->scaler_users);
4405                         *scaler_id = -1;
4406                 }
4407                 return 0;
4408         }
4409 
4410         /* range checks */
4411         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4412                 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4413 
4414                 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4415                 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4416                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4417                         "size is out of scaler range\n",
4418                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4419                 return -EINVAL;
4420         }
4421 
4422         /* mark this plane as a scaler user in crtc_state */
4423         scaler_state->scaler_users |= (1 << scaler_user);
4424         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4425                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4426                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4427                 scaler_state->scaler_users);
4428 
4429         return 0;
4430 }
4431 
4432 /**
4433  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4434  *
4435  * @state: crtc's scaler state
4436  *
4437  * Return
4438  *     0 - scaler_usage updated successfully
4439  *    error - requested scaling cannot be supported or other error condition
4440  */
4441 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4442 {
4443         struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4444         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4445 
4446         DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4447                       intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4448 
4449         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4450                 &state->scaler_state.scaler_id, DRM_ROTATE_0,
4451                 state->pipe_src_w, state->pipe_src_h,
4452                 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4453 }
4454 
4455 /**
4456  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4457  *
4458  * @state: crtc's scaler state
4459  * @plane_state: atomic plane state to update
4460  *
4461  * Return
4462  *     0 - scaler_usage updated successfully
4463  *    error - requested scaling cannot be supported or other error condition
4464  */
4465 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4466                                    struct intel_plane_state *plane_state)
4467 {
4468 
4469         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4470         struct intel_plane *intel_plane =
4471                 to_intel_plane(plane_state->base.plane);
4472         struct drm_framebuffer *fb = plane_state->base.fb;
4473         int ret;
4474 
4475         bool force_detach = !fb || !plane_state->visible;
4476 
4477         DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4478                       intel_plane->base.base.id, intel_crtc->pipe,
4479                       drm_plane_index(&intel_plane->base));
4480 
4481         ret = skl_update_scaler(crtc_state, force_detach,
4482                                 drm_plane_index(&intel_plane->base),
4483                                 &plane_state->scaler_id,
4484                                 plane_state->base.rotation,
4485                                 drm_rect_width(&plane_state->src) >> 16,
4486                                 drm_rect_height(&plane_state->src) >> 16,
4487                                 drm_rect_width(&plane_state->dst),
4488                                 drm_rect_height(&plane_state->dst));
4489 
4490         if (ret || plane_state->scaler_id < 0)
4491                 return ret;
4492 
4493         /* check colorkey */
4494         if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4495                 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4496                               intel_plane->base.base.id);
4497                 return -EINVAL;
4498         }
4499 
4500         /* Check src format */
4501         switch (fb->pixel_format) {
4502         case DRM_FORMAT_RGB565:
4503         case DRM_FORMAT_XBGR8888:
4504         case DRM_FORMAT_XRGB8888:
4505         case DRM_FORMAT_ABGR8888:
4506         case DRM_FORMAT_ARGB8888:
4507         case DRM_FORMAT_XRGB2101010:
4508         case DRM_FORMAT_XBGR2101010:
4509         case DRM_FORMAT_YUYV:
4510         case DRM_FORMAT_YVYU:
4511         case DRM_FORMAT_UYVY:
4512         case DRM_FORMAT_VYUY:
4513                 break;
4514         default:
4515                 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4516                         intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4517                 return -EINVAL;
4518         }
4519 
4520         return 0;
4521 }
4522 
4523 static void skylake_scaler_disable(struct intel_crtc *crtc)
4524 {
4525         int i;
4526 
4527         for (i = 0; i < crtc->num_scalers; i++)
4528                 skl_detach_scaler(crtc, i);
4529 }
4530 
4531 static void skylake_pfit_enable(struct intel_crtc *crtc)
4532 {
4533         struct drm_device *dev = crtc->base.dev;
4534         struct drm_i915_private *dev_priv = dev->dev_private;
4535         int pipe = crtc->pipe;
4536         struct intel_crtc_scaler_state *scaler_state =
4537                 &crtc->config->scaler_state;
4538 
4539         DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4540 
4541         if (crtc->config->pch_pfit.enabled) {
4542                 int id;
4543 
4544                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4545                         DRM_ERROR("Requesting pfit without getting a scaler first\n");
4546                         return;
4547                 }
4548 
4549                 id = scaler_state->scaler_id;
4550                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4551                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4552                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4553                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4554 
4555                 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4556         }
4557 }
4558 
4559 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4560 {
4561         struct drm_device *dev = crtc->base.dev;
4562         struct drm_i915_private *dev_priv = dev->dev_private;
4563         int pipe = crtc->pipe;
4564 
4565         if (crtc->config->pch_pfit.enabled) {
4566                 /* Force use of hard-coded filter coefficients
4567                  * as some pre-programmed values are broken,
4568                  * e.g. x201.
4569                  */
4570                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4571                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4572                                                  PF_PIPE_SEL_IVB(pipe));
4573                 else
4574                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4575                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4576                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4577         }
4578 }
4579 
4580 void hsw_enable_ips(struct intel_crtc *crtc)
4581 {
4582         struct drm_device *dev = crtc->base.dev;
4583         struct drm_i915_private *dev_priv = dev->dev_private;
4584 
4585         if (!crtc->config->ips_enabled)
4586                 return;
4587 
4588         /* We can only enable IPS after we enable a plane and wait for a vblank */
4589         intel_wait_for_vblank(dev, crtc->pipe);
4590 
4591         assert_plane_enabled(dev_priv, crtc->plane);
4592         if (IS_BROADWELL(dev)) {
4593                 mutex_lock(&dev_priv->rps.hw_lock);
4594                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4595                 mutex_unlock(&dev_priv->rps.hw_lock);
4596                 /* Quoting Art Runyan: "its not safe to expect any particular
4597                  * value in IPS_CTL bit 31 after enabling IPS through the
4598                  * mailbox." Moreover, the mailbox may return a bogus state,
4599                  * so we need to just enable it and continue on.
4600                  */
4601         } else {
4602                 I915_WRITE(IPS_CTL, IPS_ENABLE);
4603                 /* The bit only becomes 1 in the next vblank, so this wait here
4604                  * is essentially intel_wait_for_vblank. If we don't have this
4605                  * and don't wait for vblanks until the end of crtc_enable, then
4606                  * the HW state readout code will complain that the expected
4607                  * IPS_CTL value is not the one we read. */
4608                 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4609                         DRM_ERROR("Timed out waiting for IPS enable\n");
4610         }
4611 }
4612 
4613 void hsw_disable_ips(struct intel_crtc *crtc)
4614 {
4615         struct drm_device *dev = crtc->base.dev;
4616         struct drm_i915_private *dev_priv = dev->dev_private;
4617 
4618         if (!crtc->config->ips_enabled)
4619                 return;
4620 
4621         assert_plane_enabled(dev_priv, crtc->plane);
4622         if (IS_BROADWELL(dev)) {
4623                 mutex_lock(&dev_priv->rps.hw_lock);
4624                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4625                 mutex_unlock(&dev_priv->rps.hw_lock);
4626                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4627                 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4628                         DRM_ERROR("Timed out waiting for IPS disable\n");
4629         } else {
4630                 I915_WRITE(IPS_CTL, 0);
4631                 POSTING_READ(IPS_CTL);
4632         }
4633 
4634         /* We need to wait for a vblank before we can disable the plane. */
4635         intel_wait_for_vblank(dev, crtc->pipe);
4636 }
4637 
4638 /** Loads the palette/gamma unit for the CRTC with the prepared values */
4639 static void intel_crtc_load_lut(struct drm_crtc *crtc)
4640 {
4641         struct drm_device *dev = crtc->dev;
4642         struct drm_i915_private *dev_priv = dev->dev_private;
4643         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4644         enum pipe pipe = intel_crtc->pipe;
4645         int i;
4646         bool reenable_ips = false;
4647 
4648         /* The clocks have to be on to load the palette. */
4649         if (!crtc->state->active)
4650                 return;
4651 
4652         if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4653                 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
4654                         assert_dsi_pll_enabled(dev_priv);
4655                 else
4656                         assert_pll_enabled(dev_priv, pipe);
4657         }
4658 
4659         /* Workaround : Do not read or write the pipe palette/gamma data while
4660          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4661          */
4662         if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4663             ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4664              GAMMA_MODE_MODE_SPLIT)) {
4665                 hsw_disable_ips(intel_crtc);
4666                 reenable_ips = true;
4667         }
4668 
4669         for (i = 0; i < 256; i++) {
4670                 u32 palreg;
4671 
4672                 if (HAS_GMCH_DISPLAY(dev))
4673                         palreg = PALETTE(pipe, i);
4674                 else
4675                         palreg = LGC_PALETTE(pipe, i);
4676 
4677                 I915_WRITE(palreg,
4678                            (intel_crtc->lut_r[i] << 16) |
4679                            (intel_crtc->lut_g[i] << 8) |
4680                            intel_crtc->lut_b[i]);
4681         }
4682 
4683         if (reenable_ips)
4684                 hsw_enable_ips(intel_crtc);
4685 }
4686 
4687 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4688 {
4689         if (intel_crtc->overlay) {
4690                 struct drm_device *dev = intel_crtc->base.dev;
4691                 struct drm_i915_private *dev_priv = dev->dev_private;
4692 
4693                 mutex_lock(&dev->struct_mutex);
4694                 dev_priv->mm.interruptible = false;
4695                 (void) intel_overlay_switch_off(intel_crtc->overlay);
4696                 dev_priv->mm.interruptible = true;
4697                 mutex_unlock(&dev->struct_mutex);
4698         }
4699 
4700         /* Let userspace switch the overlay on again. In most cases userspace
4701          * has to recompute where to put it anyway.
4702          */
4703 }
4704 
4705 /**
4706  * intel_post_enable_primary - Perform operations after enabling primary plane
4707  * @crtc: the CRTC whose primary plane was just enabled
4708  *
4709  * Performs potentially sleeping operations that must be done after the primary
4710  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4711  * called due to an explicit primary plane update, or due to an implicit
4712  * re-enable that is caused when a sprite plane is updated to no longer
4713  * completely hide the primary plane.
4714  */
4715 static void
4716 intel_post_enable_primary(struct drm_crtc *crtc)
4717 {
4718         struct drm_device *dev = crtc->dev;
4719         struct drm_i915_private *dev_priv = dev->dev_private;
4720         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4721         int pipe = intel_crtc->pipe;
4722 
4723         /*
4724          * BDW signals flip done immediately if the plane
4725          * is disabled, even if the plane enable is already
4726          * armed to occur at the next vblank :(
4727          */
4728         if (IS_BROADWELL(dev))
4729                 intel_wait_for_vblank(dev, pipe);
4730 
4731         /*
4732          * FIXME IPS should be fine as long as one plane is
4733          * enabled, but in practice it seems to have problems
4734          * when going from primary only to sprite only and vice
4735          * versa.
4736          */
4737         hsw_enable_ips(intel_crtc);
4738 
4739         /*
4740          * Gen2 reports pipe underruns whenever all planes are disabled.
4741          * So don't enable underrun reporting before at least some planes
4742          * are enabled.
4743          * FIXME: Need to fix the logic to work when we turn off all planes
4744          * but leave the pipe running.
4745          */
4746         if (IS_GEN2(dev))
4747                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4748 
4749         /* Underruns don't raise interrupts, so check manually. */
4750         if (HAS_GMCH_DISPLAY(dev))
4751                 i9xx_check_fifo_underruns(dev_priv);
4752 }
4753 
4754 /**
4755  * intel_pre_disable_primary - Perform operations before disabling primary plane
4756  * @crtc: the CRTC whose primary plane is to be disabled
4757  *
4758  * Performs potentially sleeping operations that must be done before the
4759  * primary plane is disabled, such as updating FBC and IPS.  Note that this may
4760  * be called due to an explicit primary plane update, or due to an implicit
4761  * disable that is caused when a sprite plane completely hides the primary
4762  * plane.
4763  */
4764 static void
4765 intel_pre_disable_primary(struct drm_crtc *crtc)
4766 {
4767         struct drm_device *dev = crtc->dev;
4768         struct drm_i915_private *dev_priv = dev->dev_private;
4769         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4770         int pipe = intel_crtc->pipe;
4771 
4772         /*
4773          * Gen2 reports pipe underruns whenever all planes are disabled.
4774          * So diasble underrun reporting before all the planes get disabled.
4775          * FIXME: Need to fix the logic to work when we turn off all planes
4776          * but leave the pipe running.
4777          */
4778         if (IS_GEN2(dev))
4779                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4780 
4781         /*
4782          * Vblank time updates from the shadow to live plane control register
4783          * are blocked if the memory self-refresh mode is active at that
4784          * moment. So to make sure the plane gets truly disabled, disable
4785          * first the self-refresh mode. The self-refresh enable bit in turn
4786          * will be checked/applied by the HW only at the next frame start
4787          * event which is after the vblank start event, so we need to have a
4788          * wait-for-vblank between disabling the plane and the pipe.
4789          */
4790         if (HAS_GMCH_DISPLAY(dev)) {
4791                 intel_set_memory_cxsr(dev_priv, false);
4792                 dev_priv->wm.vlv.cxsr = false;
4793                 intel_wait_for_vblank(dev, pipe);
4794         }
4795 
4796         /*
4797          * FIXME IPS should be fine as long as one plane is
4798          * enabled, but in practice it seems to have problems
4799          * when going from primary only to sprite only and vice
4800          * versa.
4801          */
4802         hsw_disable_ips(intel_crtc);
4803 }
4804 
4805 static void intel_post_plane_update(struct intel_crtc *crtc)
4806 {
4807         struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4808         struct drm_device *dev = crtc->base.dev;
4809         struct drm_i915_private *dev_priv = dev->dev_private;
4810         struct drm_plane *plane;
4811 
4812         if (atomic->wait_vblank)
4813                 intel_wait_for_vblank(dev, crtc->pipe);
4814 
4815         intel_frontbuffer_flip(dev, atomic->fb_bits);
4816 
4817         if (atomic->disable_cxsr)
4818                 crtc->wm.cxsr_allowed = true;
4819 
4820         if (crtc->atomic.update_wm_post)
4821                 intel_update_watermarks(&crtc->base);
4822 
4823         if (atomic->update_fbc)
4824                 intel_fbc_update(dev_priv);
4825 
4826         if (atomic->post_enable_primary)
4827                 intel_post_enable_primary(&crtc->base);
4828 
4829         drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
4830                 intel_update_sprite_watermarks(plane, &crtc->base,
4831                                                0, 0, 0, false, false);
4832 
4833         memset(atomic, 0, sizeof(*atomic));
4834 }
4835 
4836 static void intel_pre_plane_update(struct intel_crtc *crtc)
4837 {
4838         struct drm_device *dev = crtc->base.dev;
4839         struct drm_i915_private *dev_priv = dev->dev_private;
4840         struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4841         struct drm_plane *p;
4842 
4843         /* Track fb's for any planes being disabled */
4844         drm_for_each_plane_mask(p, dev, atomic->disabled_planes) {
4845                 struct intel_plane *plane = to_intel_plane(p);
4846 
4847                 mutex_lock(&dev->struct_mutex);
4848                 i915_gem_track_fb(intel_fb_obj(plane->base.fb), NULL,
4849                                   plane->frontbuffer_bit);
4850                 mutex_unlock(&dev->struct_mutex);
4851         }
4852 
4853         if (atomic->wait_for_flips)
4854                 intel_crtc_wait_for_pending_flips(&crtc->base);
4855 
4856         if (atomic->disable_fbc)
4857                 intel_fbc_disable_crtc(crtc);
4858 
4859         if (crtc->atomic.disable_ips)
4860                 hsw_disable_ips(crtc);
4861 
4862         if (atomic->pre_disable_primary)
4863                 intel_pre_disable_primary(&crtc->base);
4864 
4865         if (atomic->disable_cxsr) {
4866                 crtc->wm.cxsr_allowed = false;
4867                 intel_set_memory_cxsr(dev_priv, false);
4868         }
4869 }
4870 
4871 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4872 {
4873         struct drm_device *dev = crtc->dev;
4874         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4875         struct drm_plane *p;
4876         int pipe = intel_crtc->pipe;
4877 
4878         intel_crtc_dpms_overlay_disable(intel_crtc);
4879 
4880         drm_for_each_plane_mask(p, dev, plane_mask)
4881                 to_intel_plane(p)->disable_plane(p, crtc);
4882 
4883         /*
4884          * FIXME: Once we grow proper nuclear flip support out of this we need
4885          * to compute the mask of flip planes precisely. For the time being
4886          * consider this a flip to a NULL plane.
4887          */
4888         intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4889 }
4890 
4891 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4892 {
4893         struct drm_device *dev = crtc->dev;
4894         struct drm_i915_private *dev_priv = dev->dev_private;
4895         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4896         struct intel_encoder *encoder;
4897         int pipe = intel_crtc->pipe;
4898 
4899         if (WARN_ON(intel_crtc->active))
4900                 return;
4901 
4902         if (intel_crtc->config->has_pch_encoder)
4903                 intel_prepare_shared_dpll(intel_crtc);
4904 
4905         if (intel_crtc->config->has_dp_encoder)
4906                 intel_dp_set_m_n(intel_crtc, M1_N1);
4907 
4908         intel_set_pipe_timings(intel_crtc);
4909 
4910         if (intel_crtc->config->has_pch_encoder) {
4911                 intel_cpu_transcoder_set_m_n(intel_crtc,
4912                                      &intel_crtc->config->fdi_m_n, NULL);
4913         }
4914 
4915         ironlake_set_pipeconf(crtc);
4916 
4917         intel_crtc->active = true;
4918 
4919         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4920         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4921 
4922         for_each_encoder_on_crtc(dev, crtc, encoder)
4923                 if (encoder->pre_enable)
4924                         encoder->pre_enable(encoder);
4925 
4926         if (intel_crtc->config->has_pch_encoder) {
4927                 /* Note: FDI PLL enabling _must_ be done before we enable the
4928                  * cpu pipes, hence this is separate from all the other fdi/pch
4929                  * enabling. */
4930                 ironlake_fdi_pll_enable(intel_crtc);
4931         } else {
4932                 assert_fdi_tx_disabled(dev_priv, pipe);
4933                 assert_fdi_rx_disabled(dev_priv, pipe);
4934         }
4935 
4936         ironlake_pfit_enable(intel_crtc);
4937 
4938         /*
4939          * On ILK+ LUT must be loaded before the pipe is running but with
4940          * clocks enabled
4941          */
4942         intel_crtc_load_lut(crtc);
4943 
4944         intel_update_watermarks(crtc);
4945         intel_enable_pipe(intel_crtc);
4946 
4947         if (intel_crtc->config->has_pch_encoder)
4948                 ironlake_pch_enable(crtc);
4949 
4950         assert_vblank_disabled(crtc);
4951         drm_crtc_vblank_on(crtc);
4952 
4953         for_each_encoder_on_crtc(dev, crtc, encoder)
4954                 encoder->enable(encoder);
4955 
4956         if (HAS_PCH_CPT(dev))
4957                 cpt_verify_modeset(dev, intel_crtc->pipe);
4958 }
4959 
4960 /* IPS only exists on ULT machines and is tied to pipe A. */
4961 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4962 {
4963         return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4964 }
4965 
4966 static void haswell_crtc_enable(struct drm_crtc *crtc)
4967 {
4968         struct drm_device *dev = crtc->dev;
4969         struct drm_i915_private *dev_priv = dev->dev_private;
4970         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4971         struct intel_encoder *encoder;
4972         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4973         struct intel_crtc_state *pipe_config =
4974                 to_intel_crtc_state(crtc->state);
4975         bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
4976 
4977         if (WARN_ON(intel_crtc->active))
4978                 return;
4979 
4980         if (intel_crtc_to_shared_dpll(intel_crtc))
4981                 intel_enable_shared_dpll(intel_crtc);
4982 
4983         if (intel_crtc->config->has_dp_encoder)
4984                 intel_dp_set_m_n(intel_crtc, M1_N1);
4985 
4986         intel_set_pipe_timings(intel_crtc);
4987 
4988         if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4989                 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4990                            intel_crtc->config->pixel_multiplier - 1);
4991         }
4992 
4993         if (intel_crtc->config->has_pch_encoder) {
4994                 intel_cpu_transcoder_set_m_n(intel_crtc,
4995                                      &intel_crtc->config->fdi_m_n, NULL);
4996         }
4997 
4998         haswell_set_pipeconf(crtc);
4999 
5000         intel_set_pipe_csc(crtc);
5001 
5002         intel_crtc->active = true;
5003 
5004         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5005         for_each_encoder_on_crtc(dev, crtc, encoder) {
5006                 if (encoder->pre_pll_enable)
5007                         encoder->pre_pll_enable(encoder);
5008                 if (encoder->pre_enable)
5009                         encoder->pre_enable(encoder);
5010         }
5011 
5012         if (intel_crtc->config->has_pch_encoder) {
5013                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5014                                                       true);
5015                 dev_priv->display.fdi_link_train(crtc);
5016         }
5017 
5018         if (!is_dsi)
5019                 intel_ddi_enable_pipe_clock(intel_crtc);
5020 
5021         if (INTEL_INFO(dev)->gen >= 9)
5022                 skylake_pfit_enable(intel_crtc);
5023         else
5024                 ironlake_pfit_enable(intel_crtc);
5025 
5026         /*
5027          * On ILK+ LUT must be loaded before the pipe is running but with
5028          * clocks enabled
5029          */
5030         intel_crtc_load_lut(crtc);
5031 
5032         intel_ddi_set_pipe_settings(crtc);
5033         if (!is_dsi)
5034                 intel_ddi_enable_transcoder_func(crtc);
5035 
5036         intel_update_watermarks(crtc);
5037         intel_enable_pipe(intel_crtc);
5038 
5039         if (intel_crtc->config->has_pch_encoder)
5040                 lpt_pch_enable(crtc);
5041 
5042         if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
5043                 intel_ddi_set_vc_payload_alloc(crtc, true);
5044 
5045         assert_vblank_disabled(crtc);
5046         drm_crtc_vblank_on(crtc);
5047 
5048         for_each_encoder_on_crtc(dev, crtc, encoder) {
5049                 encoder->enable(encoder);
5050                 intel_opregion_notify_encoder(encoder, true);
5051         }
5052 
5053         /* If we change the relative order between pipe/planes enabling, we need
5054          * to change the workaround. */
5055         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5056         if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5057                 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5058                 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5059         }
5060 }
5061 
5062 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5063 {
5064         struct drm_device *dev = crtc->base.dev;
5065         struct drm_i915_private *dev_priv = dev->dev_private;
5066         int pipe = crtc->pipe;
5067 
5068         /* To avoid upsetting the power well on haswell only disable the pfit if
5069          * it's in use. The hw state code will make sure we get this right. */
5070         if (force || crtc->config->pch_pfit.enabled) {
5071                 I915_WRITE(PF_CTL(pipe), 0);
5072                 I915_WRITE(PF_WIN_POS(pipe), 0);
5073                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5074         }
5075 }
5076 
5077 static void ironlake_crtc_disable(struct drm_crtc *crtc)
5078 {
5079         struct drm_device *dev = crtc->dev;
5080         struct drm_i915_private *dev_priv = dev->dev_private;
5081         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5082         struct intel_encoder *encoder;
5083         int pipe = intel_crtc->pipe;
5084         u32 reg, temp;
5085 
5086         for_each_encoder_on_crtc(dev, crtc, encoder)
5087                 encoder->disable(encoder);
5088 
5089         drm_crtc_vblank_off(crtc);
5090         assert_vblank_disabled(crtc);
5091 
5092         if (intel_crtc->config->has_pch_encoder)
5093                 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5094 
5095         intel_disable_pipe(intel_crtc);
5096 
5097         ironlake_pfit_disable(intel_crtc, false);
5098 
5099         if (intel_crtc->config->has_pch_encoder)
5100                 ironlake_fdi_disable(crtc);
5101 
5102         for_each_encoder_on_crtc(dev, crtc, encoder)
5103                 if (encoder->post_disable)
5104                         encoder->post_disable(encoder);
5105 
5106         if (intel_crtc->config->has_pch_encoder) {
5107                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5108 
5109                 if (HAS_PCH_CPT(dev)) {
5110                         /* disable TRANS_DP_CTL */
5111                         reg = TRANS_DP_CTL(pipe);
5112                         temp = I915_READ(reg);
5113                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5114                                   TRANS_DP_PORT_SEL_MASK);
5115                         temp |= TRANS_DP_PORT_SEL_NONE;
5116                         I915_WRITE(reg, temp);
5117 
5118                         /* disable DPLL_SEL */
5119                         temp = I915_READ(PCH_DPLL_SEL);
5120                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5121                         I915_WRITE(PCH_DPLL_SEL, temp);
5122                 }
5123 
5124                 ironlake_fdi_pll_disable(intel_crtc);
5125         }
5126 }
5127 
5128 static void haswell_crtc_disable(struct drm_crtc *crtc)
5129 {
5130         struct drm_device *dev = crtc->dev;
5131         struct drm_i915_private *dev_priv = dev->dev_private;
5132         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5133         struct intel_encoder *encoder;
5134         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5135         bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
5136 
5137         for_each_encoder_on_crtc(dev, crtc, encoder) {
5138                 intel_opregion_notify_encoder(encoder, false);
5139                 encoder->disable(encoder);
5140         }
5141 
5142         drm_crtc_vblank_off(crtc);
5143         assert_vblank_disabled(crtc);
5144 
5145         if (intel_crtc->config->has_pch_encoder)
5146                 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5147                                                       false);
5148         intel_disable_pipe(intel_crtc);
5149 
5150         if (intel_crtc->config->dp_encoder_is_mst)
5151                 intel_ddi_set_vc_payload_alloc(crtc, false);
5152 
5153         if (!is_dsi)
5154                 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5155 
5156         if (INTEL_INFO(dev)->gen >= 9)
5157                 skylake_scaler_disable(intel_crtc);
5158         else
5159                 ironlake_pfit_disable(intel_crtc, false);
5160 
5161         if (!is_dsi)
5162                 intel_ddi_disable_pipe_clock(intel_crtc);
5163 
5164         if (intel_crtc->config->has_pch_encoder) {
5165                 lpt_disable_pch_transcoder(dev_priv);
5166                 intel_ddi_fdi_disable(crtc);
5167         }
5168 
5169         for_each_encoder_on_crtc(dev, crtc, encoder)
5170                 if (encoder->post_disable)
5171                         encoder->post_disable(encoder);
5172 }
5173 
5174 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5175 {
5176         struct drm_device *dev = crtc->base.dev;
5177         struct drm_i915_private *dev_priv = dev->dev_private;
5178         struct intel_crtc_state *pipe_config = crtc->config;
5179 
5180         if (!pipe_config->gmch_pfit.control)
5181                 return;
5182 
5183         /*
5184          * The panel fitter should only be adjusted whilst the pipe is disabled,
5185          * according to register description and PRM.
5186          */
5187         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5188         assert_pipe_disabled(dev_priv, crtc->pipe);
5189 
5190         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5191         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5192 
5193         /* Border color in case we don't scale up to the full screen. Black by
5194          * default, change to something else for debugging. */
5195         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5196 }
5197 
5198 static enum intel_display_power_domain port_to_power_domain(enum port port)
5199 {
5200         switch (port) {
5201         case PORT_A:
5202                 return POWER_DOMAIN_PORT_DDI_A_4_LANES;
5203         case PORT_B:
5204                 return POWER_DOMAIN_PORT_DDI_B_4_LANES;
5205         case PORT_C:
5206                 return POWER_DOMAIN_PORT_DDI_C_4_LANES;
5207         case PORT_D:
5208                 return POWER_DOMAIN_PORT_DDI_D_4_LANES;
5209         case PORT_E:
5210                 return POWER_DOMAIN_PORT_DDI_E_2_LANES;
5211         default:
5212                 MISSING_CASE(port);
5213                 return POWER_DOMAIN_PORT_OTHER;
5214         }
5215 }
5216 
5217 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5218 {
5219         switch (port) {
5220         case PORT_A:
5221                 return POWER_DOMAIN_AUX_A;
5222         case PORT_B:
5223                 return POWER_DOMAIN_AUX_B;
5224         case PORT_C:
5225                 return POWER_DOMAIN_AUX_C;
5226         case PORT_D:
5227                 return POWER_DOMAIN_AUX_D;
5228         case PORT_E:
5229                 /* FIXME: Check VBT for actual wiring of PORT E */
5230                 return POWER_DOMAIN_AUX_D;
5231         default:
5232                 MISSING_CASE(port);
5233                 return POWER_DOMAIN_AUX_A;
5234         }
5235 }
5236 
5237 #define for_each_power_domain(domain, mask)                             \
5238         for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
5239                 if ((1 << (domain)) & (mask))
5240 
5241 enum intel_display_power_domain
5242 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5243 {
5244         struct drm_device *dev = intel_encoder->base.dev;
5245         struct intel_digital_port *intel_dig_port;
5246 
5247         switch (intel_encoder->type) {
5248         case INTEL_OUTPUT_UNKNOWN:
5249                 /* Only DDI platforms should ever use this output type */
5250                 WARN_ON_ONCE(!HAS_DDI(dev));
5251         case INTEL_OUTPUT_DISPLAYPORT:
5252         case INTEL_OUTPUT_HDMI:
5253         case INTEL_OUTPUT_EDP:
5254                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5255                 return port_to_power_domain(intel_dig_port->port);
5256         case INTEL_OUTPUT_DP_MST:
5257                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5258                 return port_to_power_domain(intel_dig_port->port);
5259         case INTEL_OUTPUT_ANALOG:
5260                 return POWER_DOMAIN_PORT_CRT;
5261         case INTEL_OUTPUT_DSI:
5262                 return POWER_DOMAIN_PORT_DSI;
5263         default:
5264                 return POWER_DOMAIN_PORT_OTHER;
5265         }
5266 }
5267 
5268 enum intel_display_power_domain
5269 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5270 {
5271         struct drm_device *dev = intel_encoder->base.dev;
5272         struct intel_digital_port *intel_dig_port;
5273 
5274         switch (intel_encoder->type) {
5275         case INTEL_OUTPUT_UNKNOWN:
5276         case INTEL_OUTPUT_HDMI:
5277                 /*
5278                  * Only DDI platforms should ever use these output types.
5279                  * We can get here after the HDMI detect code has already set
5280                  * the type of the shared encoder. Since we can't be sure
5281                  * what's the status of the given connectors, play safe and
5282                  * run the DP detection too.
5283                  */
5284                 WARN_ON_ONCE(!HAS_DDI(dev));
5285         case INTEL_OUTPUT_DISPLAYPORT:
5286         case INTEL_OUTPUT_EDP:
5287                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5288                 return port_to_aux_power_domain(intel_dig_port->port);
5289         case INTEL_OUTPUT_DP_MST:
5290                 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5291                 return port_to_aux_power_domain(intel_dig_port->port);
5292         default:
5293                 MISSING_CASE(intel_encoder->type);
5294                 return POWER_DOMAIN_AUX_A;
5295         }
5296 }
5297 
5298 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5299 {
5300         struct drm_device *dev = crtc->dev;
5301         struct intel_encoder *intel_encoder;
5302         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5303         enum pipe pipe = intel_crtc->pipe;
5304         unsigned long mask;
5305         enum transcoder transcoder;
5306 
5307         if (!crtc->state->active)
5308                 return 0;
5309 
5310         transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
5311 
5312         mask = BIT(POWER_DOMAIN_PIPE(pipe));
5313         mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5314         if (intel_crtc->config->pch_pfit.enabled ||
5315             intel_crtc->config->pch_pfit.force_thru)
5316                 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5317 
5318         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5319                 mask |= BIT(intel_display_port_power_domain(intel_encoder));
5320 
5321         return mask;
5322 }
5323 
5324 static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
5325 {
5326         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5327         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5328         enum intel_display_power_domain domain;
5329         unsigned long domains, new_domains, old_domains;
5330 
5331         old_domains = intel_crtc->enabled_power_domains;
5332         intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
5333 
5334         domains = new_domains & ~old_domains;
5335 
5336         for_each_power_domain(domain, domains)
5337                 intel_display_power_get(dev_priv, domain);
5338 
5339         return old_domains & ~new_domains;
5340 }
5341 
5342 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5343                                       unsigned long domains)
5344 {
5345         enum intel_display_power_domain domain;
5346 
5347         for_each_power_domain(domain, domains)
5348                 intel_display_power_put(dev_priv, domain);
5349 }
5350 
5351 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5352 {
5353         struct drm_device *dev = state->dev;
5354         struct drm_i915_private *dev_priv = dev->dev_private;
5355         unsigned long put_domains[I915_MAX_PIPES] = {};
5356         struct drm_crtc_state *crtc_state;
5357         struct drm_crtc *crtc;
5358         int i;
5359 
5360         for_each_crtc_in_state(state, crtc, crtc_state, i) {
5361                 if (needs_modeset(crtc->state))
5362                         put_domains[to_intel_crtc(crtc)->pipe] =
5363                                 modeset_get_crtc_power_domains(crtc);
5364         }
5365 
5366         if (dev_priv->display.modeset_commit_cdclk) {
5367                 unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
5368 
5369                 if (cdclk != dev_priv->cdclk_freq &&
5370                     !WARN_ON(!state->allow_modeset))
5371                         dev_priv->display.modeset_commit_cdclk(state);
5372         }
5373 
5374         for (i = 0; i < I915_MAX_PIPES; i++)
5375                 if (put_domains[i])
5376                         modeset_put_power_domains(dev_priv, put_domains[i]);
5377 }
5378 
5379 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5380 {
5381         int max_cdclk_freq = dev_priv->max_cdclk_freq;
5382 
5383         if (INTEL_INFO(dev_priv)->gen >= 9 ||
5384             IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5385                 return max_cdclk_freq;
5386         else if (IS_CHERRYVIEW(dev_priv))
5387                 return max_cdclk_freq*95/100;
5388         else if (INTEL_INFO(dev_priv)->gen < 4)
5389                 return 2*max_cdclk_freq*90/100;
5390         else
5391                 return max_cdclk_freq*90/100;
5392 }
5393 
5394 static void intel_update_max_cdclk(struct drm_device *dev)
5395 {
5396         struct drm_i915_private *dev_priv = dev->dev_private;
5397 
5398         if (IS_SKYLAKE(dev)) {
5399                 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5400 
5401                 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5402                         dev_priv->max_cdclk_freq = 675000;
5403                 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5404                         dev_priv->max_cdclk_freq = 540000;
5405                 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5406                         dev_priv->max_cdclk_freq = 450000;
5407                 else
5408                         dev_priv->max_cdclk_freq = 337500;
5409         } else if (IS_BROADWELL(dev))  {
5410                 /*
5411                  * FIXME with extra cooling we can allow
5412                  * 540 MHz for ULX and 675 Mhz for ULT.
5413                  * How can we know if extra cooling is
5414                  * available? PCI ID, VTB, something else?
5415                  */
5416                 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5417                         dev_priv->max_cdclk_freq = 450000;
5418                 else if (IS_BDW_ULX(dev))
5419                         dev_priv->max_cdclk_freq = 450000;
5420                 else if (IS_BDW_ULT(dev))
5421                         dev_priv->max_cdclk_freq = 540000;
5422                 else
5423                         dev_priv->max_cdclk_freq = 675000;
5424         } else if (IS_CHERRYVIEW(dev)) {
5425                 dev_priv->max_cdclk_freq = 320000;
5426         } else if (IS_VALLEYVIEW(dev)) {
5427                 dev_priv->max_cdclk_freq = 400000;
5428         } else {
5429                 /* otherwise assume cdclk is fixed */
5430                 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5431         }
5432 
5433         dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5434 
5435         DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5436                          dev_priv->max_cdclk_freq);
5437 
5438         DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5439                          dev_priv->max_dotclk_freq);
5440 }
5441 
5442 static void intel_update_cdclk(struct drm_device *dev)
5443 {
5444         struct drm_i915_private *dev_priv = dev->dev_private;
5445 
5446         dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5447         DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5448                          dev_priv->cdclk_freq);
5449 
5450         /*
5451          * Program the gmbus_freq based on the cdclk frequency.
5452          * BSpec erroneously claims we should aim for 4MHz, but
5453          * in fact 1MHz is the correct frequency.
5454          */
5455         if (IS_VALLEYVIEW(dev)) {
5456                 /*
5457                  * Program the gmbus_freq based on the cdclk frequency.
5458                  * BSpec erroneously claims we should aim for 4MHz, but
5459                  * in fact 1MHz is the correct frequency.
5460                  */
5461                 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5462         }
5463 
5464         if (dev_priv->max_cdclk_freq == 0)
5465                 intel_update_max_cdclk(dev);
5466 }
5467 
5468 static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5469 {
5470         struct drm_i915_private *dev_priv = dev->dev_private;
5471         uint32_t divider;
5472         uint32_t ratio;
5473         uint32_t current_freq;
5474         int ret;
5475 
5476         /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5477         switch (frequency) {
5478         case 144000:
5479                 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5480                 ratio = BXT_DE_PLL_RATIO(60);
5481                 break;
5482         case 288000:
5483                 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5484                 ratio = BXT_DE_PLL_RATIO(60);
5485                 break;
5486         case 384000:
5487                 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5488                 ratio = BXT_DE_PLL_RATIO(60);
5489                 break;
5490         case 576000:
5491                 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5492                 ratio = BXT_DE_PLL_RATIO(60);
5493                 break;
5494         case 624000:
5495                 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5496                 ratio = BXT_DE_PLL_RATIO(65);
5497                 break;
5498         case 19200:
5499                 /*
5500                  * Bypass frequency with DE PLL disabled. Init ratio, divider
5501                  * to suppress GCC warning.
5502                  */
5503                 ratio = 0;
5504                 divider = 0;
5505                 break;
5506         default:
5507                 DRM_ERROR("unsupported CDCLK freq %d", frequency);
5508 
5509                 return;
5510         }
5511 
5512         mutex_lock(&dev_priv->rps.hw_lock);
5513         /* Inform power controller of upcoming frequency change */
5514         ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5515                                       0x80000000);
5516         mutex_unlock(&dev_priv->rps.hw_lock);
5517 
5518         if (ret) {
5519                 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5520                           ret, frequency);
5521                 return;
5522         }
5523 
5524         current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5525         /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5526         current_freq = current_freq * 500 + 1000;
5527 
5528         /*
5529          * DE PLL has to be disabled when
5530          * - setting to 19.2MHz (bypass, PLL isn't used)
5531          * - before setting to 624MHz (PLL needs toggling)
5532          * - before setting to any frequency from 624MHz (PLL needs toggling)
5533          */
5534         if (frequency == 19200 || frequency == 624000 ||
5535             current_freq == 624000) {
5536                 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5537                 /* Timeout 200us */
5538                 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5539                              1))
5540                         DRM_ERROR("timout waiting for DE PLL unlock\n");
5541         }
5542 
5543         if (frequency != 19200) {
5544                 uint32_t val;
5545 
5546                 val = I915_READ(BXT_DE_PLL_CTL);
5547                 val &= ~BXT_DE_PLL_RATIO_MASK;
5548                 val |= ratio;
5549                 I915_WRITE(BXT_DE_PLL_CTL, val);
5550 
5551                 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5552                 /* Timeout 200us */
5553                 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5554                         DRM_ERROR("timeout waiting for DE PLL lock\n");
5555 
5556                 val = I915_READ(CDCLK_CTL);
5557                 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5558                 val |= divider;
5559                 /*
5560                  * Disable SSA Precharge when CD clock frequency < 500 MHz,
5561                  * enable otherwise.
5562                  */
5563                 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5564                 if (frequency >= 500000)
5565                         val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5566 
5567                 val &= ~CDCLK_FREQ_DECIMAL_MASK;
5568                 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5569                 val |= (frequency - 1000) / 500;
5570                 I915_WRITE(CDCLK_CTL, val);
5571         }
5572 
5573         mutex_lock(&dev_priv->rps.hw_lock);
5574         ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5575                                       DIV_ROUND_UP(frequency, 25000));
5576         mutex_unlock(&dev_priv->rps.hw_lock);
5577 
5578         if (ret) {
5579                 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5580                           ret, frequency);
5581                 return;
5582         }
5583 
5584         intel_update_cdclk(dev);
5585 }
5586 
5587 void broxton_init_cdclk(struct drm_device *dev)
5588 {
5589         struct drm_i915_private *dev_priv = dev->dev_private;
5590         uint32_t val;
5591 
5592         /*
5593          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5594          * or else the reset will hang because there is no PCH to respond.
5595          * Move the handshake programming to initialization sequence.
5596          * Previously was left up to BIOS.
5597          */
5598         val = I915_READ(HSW_NDE_RSTWRN_OPT);
5599         val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5600         I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5601 
5602         /* Enable PG1 for cdclk */
5603         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5604 
5605         /* check if cd clock is enabled */
5606         if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5607                 DRM_DEBUG_KMS("Display already initialized\n");
5608                 return;
5609         }
5610 
5611         /*
5612          * FIXME:
5613          * - The initial CDCLK needs to be read from VBT.
5614          *   Need to make this change after VBT has changes for BXT.
5615          * - check if setting the max (or any) cdclk freq is really necessary
5616          *   here, it belongs to modeset time
5617          */
5618         broxton_set_cdclk(dev, 624000);
5619 
5620         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5621         POSTING_READ(DBUF_CTL);
5622 
5623         udelay(10);
5624 
5625         if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5626                 DRM_ERROR("DBuf power enable timeout!\n");
5627 }
5628 
5629 void broxton_uninit_cdclk(struct drm_device *dev)
5630 {
5631         struct drm_i915_private *dev_priv = dev->dev_private;
5632 
5633         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5634         POSTING_READ(DBUF_CTL);
5635 
5636         udelay(10);
5637 
5638         if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5639                 DRM_ERROR("DBuf power disable timeout!\n");
5640 
5641         /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5642         broxton_set_cdclk(dev, 19200);
5643 
5644         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5645 }
5646 
5647 static const struct skl_cdclk_entry {
5648         unsigned int freq;
5649         unsigned int vco;
5650 } skl_cdclk_frequencies[] = {
5651         { .freq = 308570, .vco = 8640 },
5652         { .freq = 337500, .vco = 8100 },
5653         { .freq = 432000, .vco = 8640 },
5654         { .freq = 450000, .vco = 8100 },
5655         { .freq = 540000, .vco = 8100 },
5656         { .freq = 617140, .vco = 8640 },
5657         { .freq = 675000, .vco = 8100 },
5658 };
5659 
5660 static unsigned int skl_cdclk_decimal(unsigned int freq)
5661 {
5662         return (freq - 1000) / 500;
5663 }
5664 
5665 static unsigned int skl_cdclk_get_vco(unsigned int freq)
5666 {
5667         unsigned int i;
5668 
5669         for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5670                 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5671 
5672                 if (e->freq == freq)
5673                         return e->vco;
5674         }
5675 
5676         return 8100;
5677 }
5678 
5679 static void
5680 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5681 {
5682         unsigned int min_freq;
5683         u32 val;
5684 
5685         /* select the minimum CDCLK before enabling DPLL 0 */
5686         val = I915_READ(CDCLK_CTL);
5687         val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5688         val |= CDCLK_FREQ_337_308;
5689 
5690         if (required_vco == 8640)
5691                 min_freq = 308570;
5692         else
5693                 min_freq = 337500;
5694 
5695         val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5696 
5697         I915_WRITE(CDCLK_CTL, val);
5698         POSTING_READ(CDCLK_CTL);
5699 
5700         /*
5701          * We always enable DPLL0 with the lowest link rate possible, but still
5702          * taking into account the VCO required to operate the eDP panel at the
5703          * desired frequency. The usual DP link rates operate with a VCO of
5704          * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5705          * The modeset code is responsible for the selection of the exact link
5706          * rate later on, with the constraint of choosing a frequency that
5707          * works with required_vco.
5708          */
5709         val = I915_READ(DPLL_CTRL1);
5710 
5711         val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5712                  DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5713         val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5714         if (required_vco == 8640)
5715                 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5716                                             SKL_DPLL0);
5717         else
5718                 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5719                                             SKL_DPLL0);
5720 
5721         I915_WRITE(DPLL_CTRL1, val);
5722         POSTING_READ(DPLL_CTRL1);
5723 
5724         I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5725 
5726         if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5727                 DRM_ERROR("DPLL0 not locked\n");
5728 }
5729 
5730 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5731 {
5732         int ret;
5733         u32 val;
5734 
5735         /* inform PCU we want to change CDCLK */
5736         val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5737         mutex_lock(&dev_priv->rps.hw_lock);
5738         ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5739         mutex_unlock(&dev_priv->rps.hw_lock);
5740 
5741         return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5742 }
5743 
5744 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5745 {
5746         unsigned int i;
5747 
5748         for (i = 0; i < 15; i++) {
5749                 if (skl_cdclk_pcu_ready(dev_priv))
5750                         return true;
5751                 udelay(10);
5752         }
5753 
5754         return false;
5755 }
5756 
5757 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5758 {
5759         struct drm_device *dev = dev_priv->dev;
5760         u32 freq_select, pcu_ack;
5761 
5762         DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5763 
5764         if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5765                 DRM_ERROR("failed to inform PCU about cdclk change\n");
5766                 return;
5767         }
5768 
5769         /* set CDCLK_CTL */
5770         switch(freq) {
5771         case 450000:
5772         case 432000:
5773                 freq_select = CDCLK_FREQ_450_432;
5774                 pcu_ack = 1;
5775                 break;
5776         case 540000:
5777                 freq_select = CDCLK_FREQ_540;
5778                 pcu_ack = 2;
5779                 break;
5780         case 308570:
5781         case 337500:
5782         default:
5783                 freq_select = CDCLK_FREQ_337_308;
5784                 pcu_ack = 0;
5785                 break;
5786         case 617140:
5787         case 675000:
5788                 freq_select = CDCLK_FREQ_675_617;
5789                 pcu_ack = 3;
5790                 break;
5791         }
5792 
5793         I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5794         POSTING_READ(CDCLK_CTL);
5795 
5796         /* inform PCU of the change */
5797         mutex_lock(&dev_priv->rps.hw_lock);
5798         sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5799         mutex_unlock(&dev_priv->rps.hw_lock);
5800 
5801         intel_update_cdclk(dev);
5802 }
5803 
5804 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5805 {
5806         /* disable DBUF power */
5807         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5808         POSTING_READ(DBUF_CTL);
5809 
5810         udelay(10);
5811 
5812         if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5813                 DRM_ERROR("DBuf power disable timeout\n");
5814 
5815         /*
5816          * DMC assumes ownership of LCPLL and will get confused if we touch it.
5817          */
5818         if (dev_priv->csr.dmc_payload) {
5819                 /* disable DPLL0 */
5820                 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
5821                                         ~LCPLL_PLL_ENABLE);
5822                 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5823                         DRM_ERROR("Couldn't disable DPLL0\n");
5824         }
5825 
5826         intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5827 }
5828 
5829 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5830 {
5831         u32 val;
5832         unsigned int required_vco;
5833 
5834         /* enable PCH reset handshake */
5835         val = I915_READ(HSW_NDE_RSTWRN_OPT);
5836         I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
5837 
5838         /* enable PG1 and Misc I/O */
5839         intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5840 
5841         /* DPLL0 not enabled (happens on early BIOS versions) */
5842         if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5843                 /* enable DPLL0 */
5844                 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5845                 skl_dpll0_enable(dev_priv, required_vco);
5846         }
5847 
5848         /* set CDCLK to the frequency the BIOS chose */
5849         skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5850 
5851         /* enable DBUF power */
5852         I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5853         POSTING_READ(DBUF_CTL);
5854 
5855         udelay(10);
5856 
5857         if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5858                 DRM_ERROR("DBuf power enable timeout\n");
5859 }
5860 
5861 /* Adjust CDclk dividers to allow high res or save power if possible */
5862 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5863 {
5864         struct drm_i915_private *dev_priv = dev->dev_private;
5865         u32 val, cmd;
5866 
5867         WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5868                                         != dev_priv->cdclk_freq);
5869 
5870         if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5871                 cmd = 2;
5872         else if (cdclk == 266667)
5873                 cmd = 1;
5874         else
5875                 cmd = 0;
5876 
5877         mutex_lock(&dev_priv->rps.hw_lock);
5878         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5879         val &= ~DSPFREQGUAR_MASK;
5880         val |= (cmd << DSPFREQGUAR_SHIFT);
5881         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5882         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5883                       DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5884                      50)) {
5885                 DRM_ERROR("timed out waiting for CDclk change\n");
5886         }
5887         mutex_unlock(&dev_priv->rps.hw_lock);
5888 
5889         mutex_lock(&dev_priv->sb_lock);
5890 
5891         if (cdclk == 400000) {
5892                 u32 divider;
5893 
5894                 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5895 
5896                 /* adjust cdclk divider */
5897                 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5898                 val &= ~CCK_FREQUENCY_VALUES;
5899                 val |= divider;
5900                 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5901 
5902                 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5903                               CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5904                              50))
5905                         DRM_ERROR("timed out waiting for CDclk change\n");
5906         }
5907 
5908         /* adjust self-refresh exit latency value */
5909         val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5910         val &= ~0x7f;
5911 
5912         /*
5913          * For high bandwidth configs, we set a higher latency in the bunit
5914          * so that the core display fetch happens in time to avoid underruns.
5915          */
5916         if (cdclk == 400000)
5917                 val |= 4500 / 250; /* 4.5 usec */
5918         else
5919                 val |= 3000 / 250; /* 3.0 usec */
5920         vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5921 
5922         mutex_unlock(&dev_priv->sb_lock);
5923 
5924         intel_update_cdclk(dev);
5925 }
5926 
5927 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5928 {
5929         struct drm_i915_private *dev_priv = dev->dev_private;
5930         u32 val, cmd;
5931 
5932         WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5933                                                 != dev_priv->cdclk_freq);
5934 
5935         switch (cdclk) {
5936         case 333333:
5937         case 320000:
5938         case 266667:
5939         case 200000:
5940                 break;
5941         default:
5942                 MISSING_CASE(cdclk);
5943                 return;
5944         }
5945 
5946         /*
5947          * Specs are full of misinformation, but testing on actual
5948          * hardware has shown that we just need to write the desired
5949          * CCK divider into the Punit register.
5950          */
5951         cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5952 
5953         mutex_lock(&dev_priv->rps.hw_lock);
5954         val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5955         val &= ~DSPFREQGUAR_MASK_CHV;
5956         val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5957         vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5958         if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5959                       DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5960                      50)) {
5961                 DRM_ERROR("timed out waiting for CDclk change\n");
5962         }
5963         mutex_unlock(&dev_priv->rps.hw_lock);
5964 
5965         intel_update_cdclk(dev);
5966 }
5967 
5968 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5969                                  int max_pixclk)
5970 {
5971         int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5972         int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5973 
5974         /*
5975          * Really only a few cases to deal with, as only 4 CDclks are supported:
5976          *   200MHz
5977          *   267MHz
5978          *   320/333MHz (depends on HPLL freq)
5979          *   400MHz (VLV only)
5980          * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5981          * of the lower bin and adjust if needed.
5982          *
5983          * We seem to get an unstable or solid color picture at 200MHz.
5984          * Not sure what's wrong. For now use 200MHz only when all pipes
5985          * are off.
5986          */
5987         if (!IS_CHERRYVIEW(dev_priv) &&
5988             max_pixclk > freq_320*limit/100)
5989                 return 400000;
5990         else if (max_pixclk > 266667*limit/100)
5991                 return freq_320;
5992         else if (max_pixclk > 0)
5993                 return 266667;
5994         else
5995                 return 200000;
5996 }
5997 
5998 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
5999                               int max_pixclk)
6000 {
6001         /*
6002          * FIXME:
6003          * - remove the guardband, it's not needed on BXT
6004          * - set 19.2MHz bypass frequency if there are no active pipes
6005          */
6006         if (max_pixclk > 576000*9/10)
6007                 return 624000;
6008         else if (max_pixclk > 384000*9/10)
6009                 return 576000;
6010         else if (max_pixclk > 288000*9/10)
6011                 return 384000;
6012         else if (max_pixclk > 144000*9/10)
6013                 return 288000;
6014         else
6015                 return 144000;
6016 }
6017 
6018 /* Compute the max pixel clock for new configuration. Uses atomic state if
6019  * that's non-NULL, look at current state otherwise. */
6020 static int intel_mode_max_pixclk(struct drm_device *dev,
6021                                  struct drm_atomic_state *state)
6022 {
6023         struct intel_crtc *intel_crtc;
6024         struct intel_crtc_state *crtc_state;
6025         int max_pixclk = 0;
6026 
6027         for_each_intel_crtc(dev, intel_crtc) {
6028                 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6029                 if (IS_ERR(crtc_state))
6030                         return PTR_ERR(crtc_state);
6031 
6032                 if (!crtc_state->base.enable)
6033                         continue;
6034 
6035                 max_pixclk = max(max_pixclk,
6036                                  crtc_state->base.adjusted_mode.crtc_clock);
6037         }
6038 
6039         return max_pixclk;
6040 }
6041 
6042 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6043 {
6044         struct drm_device *dev = state->dev;
6045         struct drm_i915_private *dev_priv = dev->dev_private;
6046         int max_pixclk = intel_mode_max_pixclk(dev, state);
6047 
6048         if (max_pixclk < 0)
6049                 return max_pixclk;
6050 
6051         to_intel_atomic_state(state)->cdclk =
6052                 valleyview_calc_cdclk(dev_priv, max_pixclk);
6053 
6054         return 0;
6055 }
6056 
6057 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6058 {
6059         struct drm_device *dev = state->dev;
6060         struct drm_i915_private *dev_priv = dev->dev_private;
6061         int max_pixclk = intel_mode_max_pixclk(dev, state);
6062 
6063         if (max_pixclk < 0)
6064                 return max_pixclk;
6065 
6066         to_intel_atomic_state(state)->cdclk =
6067                 broxton_calc_cdclk(dev_priv, max_pixclk);
6068 
6069         return 0;
6070 }
6071 
6072 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6073 {
6074         unsigned int credits, default_credits;
6075 
6076         if (IS_CHERRYVIEW(dev_priv))
6077                 default_credits = PFI_CREDIT(12);
6078         else
6079                 default_credits = PFI_CREDIT(8);
6080 
6081         if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6082                 /* CHV suggested value is 31 or 63 */
6083                 if (IS_CHERRYVIEW(dev_priv))
6084                         credits = PFI_CREDIT_63;
6085                 else
6086                         credits = PFI_CREDIT(15);
6087         } else {
6088                 credits = default_credits;
6089         }
6090 
6091         /*
6092          * WA - write default credits before re-programming
6093          * FIXME: should we also set the resend bit here?
6094          */
6095         I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6096                    default_credits);
6097 
6098         I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6099                    credits | PFI_CREDIT_RESEND);
6100 
6101         /*
6102          * FIXME is this guaranteed to clear
6103          * immediately or should we poll for it?
6104          */
6105         WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6106 }
6107 
6108 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6109 {
6110         struct drm_device *dev = old_state->dev;
6111         unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
6112         struct drm_i915_private *dev_priv = dev->dev_private;
6113 
6114         /*
6115          * FIXME: We can end up here with all power domains off, yet
6116          * with a CDCLK frequency other than the minimum. To account
6117          * for this take the PIPE-A power domain, which covers the HW
6118          * blocks needed for the following programming. This can be
6119          * removed once it's guaranteed that we get here either with
6120          * the minimum CDCLK set, or the required power domains
6121          * enabled.
6122          */
6123         intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6124 
6125         if (IS_CHERRYVIEW(dev))
6126                 cherryview_set_cdclk(dev, req_cdclk);
6127         else
6128                 valleyview_set_cdclk(dev, req_cdclk);
6129 
6130         vlv_program_pfi_credits(dev_priv);
6131 
6132         intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6133 }
6134 
6135 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6136 {
6137         struct drm_device *dev = crtc->dev;
6138         struct drm_i915_private *dev_priv = to_i915(dev);
6139         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6140         struct intel_encoder *encoder;
6141         int pipe = intel_crtc->pipe;
6142         bool is_dsi;
6143 
6144         if (WARN_ON(intel_crtc->active))
6145                 return;
6146 
6147         is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
6148 
6149         if (intel_crtc->config->has_dp_encoder)
6150                 intel_dp_set_m_n(intel_crtc, M1_N1);
6151 
6152         intel_set_pipe_timings(intel_crtc);
6153 
6154         if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6155                 struct drm_i915_private *dev_priv = dev->dev_private;
6156 
6157                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6158                 I915_WRITE(CHV_CANVAS(pipe), 0);
6159         }
6160 
6161         i9xx_set_pipeconf(intel_crtc);
6162 
6163         intel_crtc->active = true;
6164 
6165         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6166 
6167         for_each_encoder_on_crtc(dev, crtc, encoder)
6168                 if (encoder->pre_pll_enable)
6169                         encoder->pre_pll_enable(encoder);
6170 
6171         if (!is_dsi) {
6172                 if (IS_CHERRYVIEW(dev)) {
6173                         chv_prepare_pll(intel_crtc, intel_crtc->config);
6174                         chv_enable_pll(intel_crtc, intel_crtc->config);
6175                 } else {
6176                         vlv_prepare_pll(intel_crtc, intel_crtc->config);
6177                         vlv_enable_pll(intel_crtc, intel_crtc->config);
6178                 }
6179         }
6180 
6181         for_each_encoder_on_crtc(dev, crtc, encoder)
6182                 if (encoder->pre_enable)
6183                         encoder->pre_enable(encoder);
6184 
6185         i9xx_pfit_enable(intel_crtc);
6186 
6187         intel_crtc_load_lut(crtc);
6188 
6189         intel_enable_pipe(intel_crtc);
6190 
6191         assert_vblank_disabled(crtc);
6192         drm_crtc_vblank_on(crtc);
6193 
6194         for_each_encoder_on_crtc(dev, crtc, encoder)
6195                 encoder->enable(encoder);
6196 }
6197 
6198 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6199 {
6200         struct drm_device *dev = crtc->base.dev;
6201         struct drm_i915_private *dev_priv = dev->dev_private;
6202 
6203         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6204         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6205 }
6206 
6207 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6208 {
6209         struct drm_device *dev = crtc->dev;
6210         struct drm_i915_private *dev_priv = to_i915(dev);
6211         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6212         struct intel_encoder *encoder;
6213         int pipe = intel_crtc->pipe;
6214 
6215         if (WARN_ON(intel_crtc->active))
6216                 return;
6217 
6218         i9xx_set_pll_dividers(intel_crtc);
6219 
6220         if (intel_crtc->config->has_dp_encoder)
6221                 intel_dp_set_m_n(intel_crtc, M1_N1);
6222 
6223         intel_set_pipe_timings(intel_crtc);
6224 
6225         i9xx_set_pipeconf(intel_crtc);
6226 
6227         intel_crtc->active = true;
6228 
6229         if (!IS_GEN2(dev))
6230                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6231 
6232         for_each_encoder_on_crtc(dev, crtc, encoder)
6233                 if (encoder->pre_enable)
6234                         encoder->pre_enable(encoder);
6235 
6236         i9xx_enable_pll(intel_crtc);
6237 
6238         i9xx_pfit_enable(intel_crtc);
6239 
6240         intel_crtc_load_lut(crtc);
6241 
6242         intel_update_watermarks(crtc);
6243         intel_enable_pipe(intel_crtc);
6244 
6245         assert_vblank_disabled(crtc);
6246         drm_crtc_vblank_on(crtc);
6247 
6248         for_each_encoder_on_crtc(dev, crtc, encoder)
6249                 encoder->enable(encoder);
6250 }
6251 
6252 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6253 {
6254         struct drm_device *dev = crtc->base.dev;
6255         struct drm_i915_private *dev_priv = dev->dev_private;
6256 
6257         if (!crtc->config->gmch_pfit.control)
6258                 return;
6259 
6260         assert_pipe_disabled(dev_priv, crtc->pipe);
6261 
6262         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6263                          I915_READ(PFIT_CONTROL));
6264         I915_WRITE(PFIT_CONTROL, 0);
6265 }
6266 
6267 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6268 {
6269         struct drm_device *dev = crtc->dev;
6270         struct drm_i915_private *dev_priv = dev->dev_private;
6271         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6272         struct intel_encoder *encoder;
6273         int pipe = intel_crtc->pipe;
6274 
6275         /*
6276          * On gen2 planes are double buffered but the pipe isn't, so we must
6277          * wait for planes to fully turn off before disabling the pipe.
6278          * We also need to wait on all gmch platforms because of the
6279          * self-refresh mode constraint explained above.
6280          */
6281         intel_wait_for_vblank(dev, pipe);
6282 
6283         for_each_encoder_on_crtc(dev, crtc, encoder)
6284                 encoder->disable(encoder);
6285 
6286         drm_crtc_vblank_off(crtc);
6287         assert_vblank_disabled(crtc);
6288 
6289         intel_disable_pipe(intel_crtc);
6290 
6291         i9xx_pfit_disable(intel_crtc);
6292 
6293         for_each_encoder_on_crtc(dev, crtc, encoder)
6294                 if (encoder->post_disable)
6295                         encoder->post_disable(encoder);
6296 
6297         if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
6298                 if (IS_CHERRYVIEW(dev))
6299                         chv_disable_pll(dev_priv, pipe);
6300                 else if (IS_VALLEYVIEW(dev))
6301                         vlv_disable_pll(dev_priv, pipe);
6302                 else
6303                         i9xx_disable_pll(intel_crtc);
6304         }
6305 
6306         for_each_encoder_on_crtc(dev, crtc, encoder)
6307                 if (encoder->post_pll_disable)
6308                         encoder->post_pll_disable(encoder);
6309 
6310         if (!IS_GEN2(dev))
6311                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6312 }
6313 
6314 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6315 {
6316         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6317         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6318         enum intel_display_power_domain domain;
6319         unsigned long domains;
6320 
6321         if (!intel_crtc->active)
6322                 return;
6323 
6324         if (to_intel_plane_state(crtc->primary->state)->visible) {
6325                 intel_crtc_wait_for_pending_flips(crtc);
6326                 intel_pre_disable_primary(crtc);
6327 
6328                 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6329                 to_intel_plane_state(crtc->primary->state)->visible = false;
6330         }
6331 
6332         dev_priv->display.crtc_disable(crtc);
6333         intel_crtc->active = false;
6334         intel_update_watermarks(crtc);
6335         intel_disable_shared_dpll(intel_crtc);
6336 
6337         domains = intel_crtc->enabled_power_domains;
6338         for_each_power_domain(domain, domains)
6339                 intel_display_power_put(dev_priv, domain);
6340         intel_crtc->enabled_power_domains = 0;
6341 }
6342 
6343 /*
6344  * turn all crtc's off, but do not adjust state
6345  * This has to be paired with a call to intel_modeset_setup_hw_state.
6346  */
6347 int intel_display_suspend(struct drm_device *dev)
6348 {
6349         struct drm_mode_config *config = &dev->mode_config;
6350         struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
6351         struct drm_atomic_state *state;
6352         struct drm_crtc *crtc;
6353         unsigned crtc_mask = 0;
6354         int ret = 0;
6355 
6356         if (WARN_ON(!ctx))
6357                 return 0;
6358 
6359         lockdep_assert_held(&ctx->ww_ctx);
6360         state = drm_atomic_state_alloc(dev);
6361         if (WARN_ON(!state))
6362                 return -ENOMEM;
6363 
6364         state->acquire_ctx = ctx;
6365         state->allow_modeset = true;
6366 
6367         for_each_crtc(dev, crtc) {
6368                 struct drm_crtc_state *crtc_state =
6369                         drm_atomic_get_crtc_state(state, crtc);
6370 
6371                 ret = PTR_ERR_OR_ZERO(crtc_state);
6372                 if (ret)
6373                         goto free;
6374 
6375                 if (!crtc_state->active)
6376                         continue;
6377 
6378                 crtc_state->active = false;
6379                 crtc_mask |= 1 << drm_crtc_index(crtc);
6380         }
6381 
6382         if (crtc_mask) {
6383                 ret = drm_atomic_commit(state);
6384 
6385                 if (!ret) {
6386                         for_each_crtc(dev, crtc)
6387                                 if (crtc_mask & (1 << drm_crtc_index(crtc)))
6388                                         crtc->state->active = true;
6389 
6390                         return ret;
6391                 }
6392         }
6393 
6394 free:
6395         if (ret)
6396                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6397         drm_atomic_state_free(state);
6398         return ret;
6399 }
6400 
6401 void intel_encoder_destroy(struct drm_encoder *encoder)
6402 {
6403         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6404 
6405         drm_encoder_cleanup(encoder);
6406         kfree(intel_encoder);
6407 }
6408 
6409 /* Cross check the actual hw state with our own modeset state tracking (and it's
6410  * internal consistency). */
6411 static void intel_connector_check_state(struct intel_connector *connector)
6412 {
6413         struct drm_crtc *crtc = connector->base.state->crtc;
6414 
6415         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6416                       connector->base.base.id,
6417                       connector->base.name);
6418 
6419         if (connector->get_hw_state(connector)) {
6420                 struct intel_encoder *encoder = connector->encoder;
6421                 struct drm_connector_state *conn_state = connector->base.state;
6422 
6423                 I915_STATE_WARN(!crtc,
6424                          "connector enabled without attached crtc\n");
6425 
6426                 if (!crtc)
6427                         return;
6428 
6429                 I915_STATE_WARN(!crtc->state->active,
6430                       "connector is active, but attached crtc isn't\n");
6431 
6432                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6433                         return;
6434 
6435                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6436                         "atomic encoder doesn't match attached encoder\n");
6437 
6438                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6439                         "attached encoder crtc differs from connector crtc\n");
6440         } else {
6441                 I915_STATE_WARN(crtc && crtc->state->active,
6442                         "attached crtc is active, but connector isn't\n");
6443                 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6444                         "best encoder set without crtc!\n");
6445         }
6446 }
6447 
6448 int intel_connector_init(struct intel_connector *connector)
6449 {
6450         struct drm_connector_state *connector_state;
6451 
6452         connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
6453         if (!connector_state)
6454                 return -ENOMEM;
6455 
6456         connector->base.state = connector_state;
6457         return 0;
6458 }
6459 
6460 struct intel_connector *intel_connector_alloc(void)
6461 {
6462         struct intel_connector *connector;
6463 
6464         connector = kzalloc(sizeof *connector, GFP_KERNEL);
6465         if (!connector)
6466                 return NULL;
6467 
6468         if (intel_connector_init(connector) < 0) {
6469                 kfree(connector);
6470                 return NULL;
6471         }
6472 
6473         return connector;
6474 }
6475 
6476 /* Simple connector->get_hw_state implementation for encoders that support only
6477  * one connector and no cloning and hence the encoder state determines the state
6478  * of the connector. */
6479 bool intel_connector_get_hw_state(struct intel_connector *connector)
6480 {
6481         enum pipe pipe = 0;
6482         struct