2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
27 #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
35 /* layer mixer used for this CRTC (+ its lock): */
36 #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
40 /* CTL used for this CRTC: */
43 /* if there is a pending flip, these will be non-null: */
44 struct drm_pending_vblank_event *event;
46 #define PENDING_CURSOR 0x1
47 #define PENDING_FLIP 0x2
50 struct mdp_irq vblank;
53 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
55 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
57 struct msm_drm_private *priv = crtc->dev->dev_private;
58 return to_mdp5_kms(to_mdp_kms(priv->kms));
61 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
63 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
65 atomic_or(pending, &mdp5_crtc->pending);
66 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
69 #define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
71 static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
73 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
75 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
76 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
80 * flush updates, to make sure hw is updated to new scanout fb,
81 * so that we can safely queue unref to current fb (ie. next
82 * vblank we know hw is done w/ previous scanout_fb).
84 static void crtc_flush_all(struct drm_crtc *crtc)
86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 struct drm_plane *plane;
88 uint32_t flush_mask = 0;
90 /* we could have already released CTL in the disable path: */
94 drm_atomic_crtc_for_each_plane(plane, crtc) {
95 flush_mask |= mdp5_plane_get_flush(plane);
97 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
100 crtc_flush(crtc, flush_mask);
103 /* if file!=NULL, this is preclose potential cancel-flip path */
104 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
106 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
107 struct drm_device *dev = crtc->dev;
108 struct drm_pending_vblank_event *event;
109 struct drm_plane *plane;
112 spin_lock_irqsave(&dev->event_lock, flags);
113 event = mdp5_crtc->event;
115 /* if regular vblank case (!file) or if cancel-flip from
116 * preclose on file that requested flip, then send the
119 if (!file || (event->base.file_priv == file)) {
120 mdp5_crtc->event = NULL;
121 DBG("%s: send event: %p", mdp5_crtc->name, event);
122 drm_send_vblank_event(dev, mdp5_crtc->id, event);
125 spin_unlock_irqrestore(&dev->event_lock, flags);
127 drm_atomic_crtc_for_each_plane(plane, crtc) {
128 mdp5_plane_complete_flip(plane);
132 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
134 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
136 drm_crtc_cleanup(crtc);
141 static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
143 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
144 struct mdp5_kms *mdp5_kms = get_kms(crtc);
145 bool enabled = (mode == DRM_MODE_DPMS_ON);
147 DBG("%s: mode=%d", mdp5_crtc->name, mode);
149 if (enabled != mdp5_crtc->enabled) {
151 mdp5_enable(mdp5_kms);
152 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
154 /* set STAGE_UNUSED for all layers */
155 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
156 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
157 mdp5_disable(mdp5_kms);
159 mdp5_crtc->enabled = enabled;
163 static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
164 const struct drm_display_mode *mode,
165 struct drm_display_mode *adjusted_mode)
171 * blend_setup() - blend all the planes of a CRTC
173 * When border is enabled, the border color will ALWAYS be the base layer.
174 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
175 * If disabled, the first plane starts at STAGE_BASE.
178 * Border is not enabled here because the private plane is exactly
179 * the CRTC resolution.
181 static void blend_setup(struct drm_crtc *crtc)
183 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
184 struct mdp5_kms *mdp5_kms = get_kms(crtc);
185 struct drm_plane *plane;
186 const struct mdp5_cfg_hw *hw_cfg;
187 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
189 #define blender(stage) ((stage) - STAGE_BASE)
191 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
193 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
195 /* ctl could be released already when we are shutting down: */
199 drm_atomic_crtc_for_each_plane(plane, crtc) {
200 enum mdp_mixer_stage_id stage =
201 to_mdp5_plane_state(plane->state)->stage;
204 * Note: This cannot happen with current implementation but
205 * we need to check this condition once z property is added
207 BUG_ON(stage > hw_cfg->lm.nb_stages);
211 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
212 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
213 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
214 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
215 blender(stage)), 0xff);
216 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
217 blender(stage)), 0x00);
219 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
220 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
221 pipe2name(mdp5_plane_pipe(plane)), stage);
224 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
225 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
228 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
231 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
234 struct mdp5_kms *mdp5_kms = get_kms(crtc);
236 struct drm_display_mode *mode;
238 if (WARN_ON(!crtc->state))
241 mode = &crtc->state->adjusted_mode;
243 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
244 mdp5_crtc->name, mode->base.id, mode->name,
245 mode->vrefresh, mode->clock,
246 mode->hdisplay, mode->hsync_start,
247 mode->hsync_end, mode->htotal,
248 mode->vdisplay, mode->vsync_start,
249 mode->vsync_end, mode->vtotal,
250 mode->type, mode->flags);
252 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
253 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
254 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
255 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
256 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
259 static void mdp5_crtc_prepare(struct drm_crtc *crtc)
261 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
262 DBG("%s", mdp5_crtc->name);
263 /* make sure we hold a ref to mdp clks while setting up mode: */
264 mdp5_enable(get_kms(crtc));
265 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
268 static void mdp5_crtc_commit(struct drm_crtc *crtc)
270 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
271 DBG("%s", mdp5_crtc->name);
272 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
273 crtc_flush_all(crtc);
274 /* drop the ref to mdp clk's that we got in prepare: */
275 mdp5_disable(get_kms(crtc));
279 struct drm_plane *plane;
280 struct mdp5_plane_state *state;
283 static int pstate_cmp(const void *a, const void *b)
285 struct plane_state *pa = (struct plane_state *)a;
286 struct plane_state *pb = (struct plane_state *)b;
287 return pa->state->zpos - pb->state->zpos;
290 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
291 struct drm_crtc_state *state)
293 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
294 struct mdp5_kms *mdp5_kms = get_kms(crtc);
295 struct drm_plane *plane;
296 struct drm_device *dev = crtc->dev;
297 struct plane_state pstates[STAGE3 + 1];
300 DBG("%s: check", mdp5_crtc->name);
302 /* request a free CTL, if none is already allocated for this CRTC */
303 if (state->enable && !mdp5_crtc->ctl) {
304 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
305 if (WARN_ON(!mdp5_crtc->ctl))
309 /* verify that there are not too many planes attached to crtc
310 * and that we don't have conflicting mixer stages:
312 drm_atomic_crtc_state_for_each_plane(plane, state) {
313 struct drm_plane_state *pstate;
315 if (cnt >= ARRAY_SIZE(pstates)) {
316 dev_err(dev->dev, "too many planes!\n");
320 pstate = state->state->plane_states[drm_plane_index(plane)];
322 /* plane might not have changed, in which case take
326 pstate = plane->state;
328 pstates[cnt].plane = plane;
329 pstates[cnt].state = to_mdp5_plane_state(pstate);
334 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
336 for (i = 0; i < cnt; i++) {
337 pstates[i].state->stage = STAGE_BASE + i;
338 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
339 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
340 pstates[i].state->stage);
346 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
348 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
349 DBG("%s: begin", mdp5_crtc->name);
352 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
354 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
355 struct drm_device *dev = crtc->dev;
358 DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
360 WARN_ON(mdp5_crtc->event);
362 spin_lock_irqsave(&dev->event_lock, flags);
363 mdp5_crtc->event = crtc->state->event;
364 spin_unlock_irqrestore(&dev->event_lock, flags);
367 crtc_flush_all(crtc);
368 request_pending(crtc, PENDING_FLIP);
370 if (mdp5_crtc->ctl && !crtc->state->enable) {
371 mdp5_ctl_release(mdp5_crtc->ctl);
372 mdp5_crtc->ctl = NULL;
376 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
377 struct drm_property *property, uint64_t val)
383 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
384 .set_config = drm_atomic_helper_set_config,
385 .destroy = mdp5_crtc_destroy,
386 .page_flip = drm_atomic_helper_page_flip,
387 .set_property = mdp5_crtc_set_property,
388 .reset = drm_atomic_helper_crtc_reset,
389 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
390 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
393 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
394 .dpms = mdp5_crtc_dpms,
395 .mode_fixup = mdp5_crtc_mode_fixup,
396 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
397 .mode_set = drm_helper_crtc_mode_set,
398 .mode_set_base = drm_helper_crtc_mode_set_base,
399 .prepare = mdp5_crtc_prepare,
400 .commit = mdp5_crtc_commit,
401 .atomic_check = mdp5_crtc_atomic_check,
402 .atomic_begin = mdp5_crtc_atomic_begin,
403 .atomic_flush = mdp5_crtc_atomic_flush,
406 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
408 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
409 struct drm_crtc *crtc = &mdp5_crtc->base;
412 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
414 pending = atomic_xchg(&mdp5_crtc->pending, 0);
416 if (pending & PENDING_FLIP) {
417 complete_flip(crtc, NULL);
421 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
423 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
425 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
428 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
430 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
431 return mdp5_crtc->vblank.irqmask;
434 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
436 DBG("cancel: %p", file);
437 complete_flip(crtc, file);
440 /* set interface for routing crtc->encoder: */
441 void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
442 enum mdp5_intf intf_id)
444 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
445 struct mdp5_kms *mdp5_kms = get_kms(crtc);
446 uint32_t flush_mask = 0;
450 /* now that we know what irq's we want: */
451 mdp5_crtc->err.irqmask = intf2err(intf);
452 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
453 mdp_irq_update(&mdp5_kms->base);
455 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
456 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
460 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
461 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
464 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
465 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
468 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
469 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
472 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
473 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
480 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
481 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
483 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
484 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
485 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
486 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
488 crtc_flush(crtc, flush_mask);
491 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
493 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
498 return mdp5_crtc->lm;
501 /* initialize crtc */
502 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
503 struct drm_plane *plane, int id)
505 struct drm_crtc *crtc = NULL;
506 struct mdp5_crtc *mdp5_crtc;
508 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
510 return ERR_PTR(-ENOMEM);
512 crtc = &mdp5_crtc->base;
515 mdp5_crtc->lm = GET_LM_ID(id);
517 spin_lock_init(&mdp5_crtc->lm_lock);
519 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
520 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
522 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
523 pipe2name(mdp5_plane_pipe(plane)), id);
525 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
526 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
529 mdp5_plane_install_properties(plane, &crtc->base);