Merge remote-tracking branch 'origin/display/dpu' into display/next

* origin/display/dpu: (73 commits)
  gpu: imx: framegen: Use crtc_clock instead of mode clock
  gpu: imx: dpu: common: Initialize SCU misc settings in dpu_resume()
  LF-73 gpu: imx: dpu: sc misc: Initialze KACHUNK_CNT as needed by blit engine
  gpu: imx: dpu: sc misc: Rename dpu_pxlink_init() to dpu_sc_misc_init()
  gpu: imx: dpu: sc misc: Rename dpu_sc_misc_init() to dpu_sc_misc_get_handle()
  ...
This commit is contained in:
Dong Aisheng 2019-12-02 18:01:01 +08:00
commit a1cb6f9924
54 changed files with 11226 additions and 24 deletions

View File

@ -110,6 +110,218 @@ prg@21cc000 {
fsl,pres = <&pre1>, <&pre2>, <&pre3>;
};
Freescale i.MX DPU
====================
Required properties:
- compatible: Should be "fsl,<chip>-dpu"
- reg: should be register base and length as documented in the
datasheet
- interrupt-parent: phandle pointing to the parent interrupt controller.
- interrupts, interrupt-names: Should contain interrupts and names as
documented in the datasheet.
- clocks, clock-names: phandles to the DPU clocks described in
Documentation/devicetree/bindings/clock/clock-bindings.txt
The following clocks are expected on i.MX8qxp:
"pll0" - PLL clock for display interface 0
"pll1" - PLL clock for display interface 1
"disp0" - pixel clock for display interface 0
"disp1" - pixel clock for display interface 1
The needed clock numbers for each are documented in
Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt.
- power-domains: phandles pointing to power domain.
- power-domain-names: power domain names relevant to power-domains phandles.
- fsl,dpr-channels: phandles to the DPR channels attached to this DPU,
sorted by memory map addresses.
- fsl,pixel-combiner: phandle to the pixel combiner unit attached to this DPU.
Optional properties:
- port@[0-1]: Port nodes with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt.
ports 0 and 1 should correspond to display interface 0 and
display interface 1, respectively.
example:
dpu: dpu@56180000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx8qxp-dpu";
reg = <0x56180000 0x40000>;
interrupt-parent = <&irqsteer_dpu>;
interrupts = <448>, <449>, <450>, <64>,
<65>, <66>, <67>, <68>,
<69>, <70>, <193>, <194>,
<195>, <196>, <197>, <72>,
<73>, <74>, <75>, <76>,
<77>, <78>, <79>, <80>,
<81>, <199>, <200>, <201>,
<202>, <203>, <204>, <205>,
<206>, <207>, <208>, <0>,
<1>, <2>, <3>, <4>,
<82>, <83>, <84>, <85>,
<209>, <210>, <211>, <212>;
interrupt-names = "store9_shdload",
"store9_framecomplete",
"store9_seqcomplete",
"extdst0_shdload",
"extdst0_framecomplete",
"extdst0_seqcomplete",
"extdst4_shdload",
"extdst4_framecomplete",
"extdst4_seqcomplete",
"extdst1_shdload",
"extdst1_framecomplete",
"extdst1_seqcomplete",
"extdst5_shdload",
"extdst5_framecomplete",
"extdst5_seqcomplete",
"disengcfg_shdload0",
"disengcfg_framecomplete0",
"disengcfg_seqcomplete0",
"framegen0_int0",
"framegen0_int1",
"framegen0_int2",
"framegen0_int3",
"sig0_shdload",
"sig0_valid",
"sig0_error",
"disengcfg_shdload1",
"disengcfg_framecomplete1",
"disengcfg_seqcomplete1",
"framegen1_int0",
"framegen1_int1",
"framegen1_int2",
"framegen1_int3",
"sig1_shdload",
"sig1_valid",
"sig1_error",
"cmdseq_error",
"comctrl_sw0",
"comctrl_sw1",
"comctrl_sw2",
"comctrl_sw3",
"framegen0_primsync_on",
"framegen0_primsync_off",
"framegen0_secsync_on",
"framegen0_secsync_off",
"framegen1_primsync_on",
"framegen1_primsync_off",
"framegen1_secsync_on",
"framegen1_secsync_off";
clocks = <&dc_lpcg IMX_DC0_PLL0_CLK>,
<&dc_lpcg IMX_DC0_PLL1_CLK>,
<&dc_lpcg IMX_DC0_DISP0_CLK>,
<&dc_lpcg IMX_DC0_DISP1_CLK>;
clock-names = "pll0", "pll1", "disp0", "disp1";
power-domains = <&pd IMX_SC_R_DC_0>,
<&pd IMX_SC_R_DC_0_PLL_0>,
<&pd IMX_SC_R_DC_0_PLL_1>;
power-domain-names = "dc", "pll0", "pll1";
fsl,dpr-channels = <&dc0_dpr1_channel1>, <&dc0_dpr1_channel2>,
<&dc0_dpr1_channel3>, <&dc0_dpr2_channel1>,
<&dc0_dpr2_channel2>, <&dc0_dpr2_channel3>;
fsl,pixel-combiner = <&dc0_pc>;
dpu_disp0: port@0 {
reg = <0>;
dpu_disp0_lvds0_ch0: endpoint@0 {
remote-endpoint = <&ldb1_ch0>;
};
dpu_disp0_lvds0_ch1: endpoint@1 {
remote-endpoint = <&ldb1_ch1>;
};
dpu_disp0_mipi_dsi: endpoint@2 {
};
};
dpu_disp1: port@1 {
reg = <1>;
dpu_disp1_lvds1_ch0: endpoint@0 {
remote-endpoint = <&ldb2_ch0>;
};
dpu_disp1_lvds1_ch1: endpoint@1 {
remote-endpoint = <&ldb2_ch1>;
};
dpu_disp1_mipi_dsi: endpoint@2 {
};
};
};
Freescale i.MX8 PC (Pixel Combiner)
=============================================
Required properties:
- compatible: should be "fsl,<chip>-pixel-combiner"
- reg: should be register base and length as documented in the
datasheet
- power-domains: phandle pointing to power domain
example:
pixel-combiner@56020000 {
compatible = "fsl,imx8qm-pixel-combiner";
reg = <0x56020000 0x10000>;
power-domains = <&pd IMX_SC_R_DC_0>;
};
Freescale i.MX8 PRG (Prefetch Resolve Gasket)
=============================================
Required properties:
- compatible: should be "fsl,<chip>-prg"
- reg: should be register base and length as documented in the
datasheet
- clocks: phandles to the PRG apb and rtram clocks, as described in
Documentation/devicetree/bindings/clock/clock-bindings.txt and
Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt.
- clock-names: should be "apb" and "rtram"
- power-domains: phandle pointing to power domain
example:
prg@56040000 {
compatible = "fsl,imx8qm-prg";
reg = <0x56040000 0x10000>;
clocks = <&dc0_prg0_lpcg 0>, <&dc0_prg0_lpcg 1>;
clock-names = "apb", "rtram";
power-domains = <&pd IMX_SC_R_DC_0>;
};
Freescale i.MX8 DPRC (Display Prefetch Resolve Channel)
=======================================================
Required properties:
- compatible: should be "fsl,<chip>-dpr-channel"
- reg: should be register base and length as documented in the
datasheet
- fsl,sc-resource: SCU resource number as defined in
include/dt-bindings/firmware/imx/rsrc.h
- fsl,prgs: phandles to the PRG unit(s) attached to this DPRC, the first one
is the primary PRG and the second one(if available) is the auxiliary PRG
which is used to fetch luma chunk of a YUV frame with 2 planars.
- clocks: phandles to the DPRC apb, b and rtram clocks, as described in
Documentation/devicetree/bindings/clock/clock-bindings.txt and
Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt.
- clock-names: should be "apb", "b" and "rtram"
- power-domains: phandle pointing to power domain
example:
dpr-channel@560e0000 {
compatible = "fsl,imx8qm-dpr-channel";
reg = <0x560e0000 0x10000>;
fsl,sc-resource = <IMX_SC_R_DC_0_BLIT1>;
fsl,prgs = <&dc0_prg2>, <&dc0_prg1>;
clocks = <&dc0_dpr0_lpcg 0>,
<&dc0_dpr0_lpcg 1>,
<&dc0_rtram0_lpcg 0>;
clock-names = "apb", "b", "rtram";
power-domains = <&pd IMX_SC_R_DC_0>;
};
Parallel display support
========================

View File

@ -3,5 +3,5 @@
# taken to initialize them in the correct order. Link order is the only way
# to ensure this currently.
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-y += imx/
obj-y += drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/

View File

@ -6,7 +6,7 @@ config DRM_IMX
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
depends on IMX_IPUV3_CORE || IMX_DPU_CORE
help
enable i.MX graphics support
@ -33,6 +33,13 @@ config DRM_IMX_LDB
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
config DRM_IMX_IPUV3
tristate
depends on DRM_IMX
depends on IMX_IPUV3_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
select DRM_DW_HDMI
@ -40,4 +47,5 @@ config DRM_IMX_HDMI
help
Choose this if you want to use HDMI on i.MX6.
source "drivers/gpu/drm/imx/dpu/Kconfig"
source "drivers/gpu/drm/imx/dcss/Kconfig"

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
imxdrm-objs := imx-drm-core.o
obj-$(CONFIG_DRM_IMX) += imxdrm.o
@ -8,5 +8,8 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
obj-$(CONFIG_DRM_IMX_DPU) += dpu/
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/

View File

@ -0,0 +1,6 @@
config DRM_IMX_DPU
tristate
depends on DRM_IMX
depends on IMX_DPU_CORE
default y if DRM_IMX=y
default m if DRM_IMX=m

View File

@ -0,0 +1,4 @@
ccflags-y += -I $(srctree)/$(src)/../
imx-dpu-crtc-objs := dpu-crtc.o dpu-kms.o dpu-plane.o
obj-$(CONFIG_DRM_IMX_DPU) += imx-dpu-crtc.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,94 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _DPU_CRTC_H_
#define _DPU_CRTC_H_
#include <drm/drm_vblank.h>
#include <video/dpu.h>
#include "dpu-plane.h"
#include "../imx-drm.h"
struct dpu_crtc {
struct device *dev;
struct drm_crtc base;
struct imx_drm_crtc *imx_crtc;
struct dpu_constframe *pa_cf;
struct dpu_constframe *sa_cf;
struct dpu_disengcfg *dec;
struct dpu_extdst *ed;
struct dpu_framegen *fg;
struct dpu_tcon *tcon;
struct dpu_store *st;
struct dpu_constframe *aux_pa_cf;
struct dpu_constframe *aux_sa_cf;
struct dpu_disengcfg *aux_dec;
struct dpu_extdst *aux_ed;
struct dpu_framegen *aux_fg;
struct dpu_tcon *aux_tcon;
/* master */
struct dpu_constframe *m_pa_cf;
struct dpu_constframe *m_sa_cf;
struct dpu_disengcfg *m_dec;
struct dpu_extdst *m_ed;
struct dpu_framegen *m_fg;
struct dpu_tcon *m_tcon;
/* slave */
struct dpu_constframe *s_pa_cf;
struct dpu_constframe *s_sa_cf;
struct dpu_disengcfg *s_dec;
struct dpu_extdst *s_ed;
struct dpu_framegen *s_fg;
struct dpu_tcon *s_tcon;
struct dpu_plane **plane;
unsigned int hw_plane_num;
unsigned int stream_id;
unsigned int crtc_grp_id;
unsigned int syncmode_min_prate;
unsigned int singlemode_max_width;
unsigned int master_stream_id;
int vbl_irq;
int safety_shdld_irq;
int content_shdld_irq;
int dec_shdld_irq;
bool aux_is_master;
struct completion safety_shdld_done;
struct completion content_shdld_done;
struct completion dec_shdld_done;
struct drm_pending_vblank_event *event;
};
struct dpu_crtc_state {
struct imx_crtc_state imx_crtc_state;
struct dpu_plane_state **dpu_plane_states;
bool use_pc;
};
static inline struct dpu_crtc_state *to_dpu_crtc_state(struct imx_crtc_state *s)
{
return container_of(s, struct dpu_crtc_state, imx_crtc_state);
}
static inline struct dpu_crtc *to_dpu_crtc(struct drm_crtc *crtc)
{
return container_of(crtc, struct dpu_crtc, base);
}
struct dpu_plane_state **
crtc_state_get_dpu_plane_states(struct drm_crtc_state *state);
#endif

View File

@ -0,0 +1,726 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/sort.h>
#include <video/dpu.h>
#include "dpu-crtc.h"
#include "dpu-plane.h"
#include "../imx-drm.h"
static struct drm_plane_state **
dpu_atomic_alloc_tmp_planes_per_crtc(struct drm_device *dev)
{
int total_planes = dev->mode_config.num_total_plane;
struct drm_plane_state **states;
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
return ERR_PTR(-ENOMEM);
return states;
}
static int zpos_cmp(const void *a, const void *b)
{
const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
return sa->normalized_zpos - sb->normalized_zpos;
}
static int dpu_atomic_sort_planes_per_crtc(struct drm_crtc_state *crtc_state,
struct drm_plane_state **states)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_device *dev = state->dev;
struct drm_plane *plane;
int n = 0;
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
states[n++] = plane_state;
}
sort(states, n, sizeof(*states), zpos_cmp, NULL);
return n;
}
static void
dpu_atomic_compute_plane_lrx_per_crtc(struct drm_crtc_state *crtc_state,
struct drm_plane_state **states, int n)
{
struct dpu_plane_state *dpstate;
struct drm_plane_state *plane_state;
int i;
int half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1;
bool lo, ro, bo;
/* compute left/right_crtc_x if pixel combiner is needed */
for (i = 0; i < n; i++) {
plane_state = states[i];
dpstate = to_dpu_plane_state(plane_state);
lo = dpstate->left_src_w && !dpstate->right_src_w;
ro = !dpstate->left_src_w && dpstate->right_src_w;
bo = dpstate->left_src_w && dpstate->right_src_w;
if (lo || bo) {
dpstate->left_crtc_x = plane_state->crtc_x;
dpstate->right_crtc_x = 0;
} else if (ro) {
dpstate->left_crtc_x = 0;
dpstate->right_crtc_x =
plane_state->crtc_x - half_hdisplay;
}
}
}
static void
dpu_atomic_set_top_plane_per_crtc(struct drm_plane_state **states, int n,
bool use_pc)
{
struct dpu_plane_state *dpstate;
bool found_l_top = false, found_r_top = false;
int i;
for (i = n - 1; i >= 0; i--) {
dpstate = to_dpu_plane_state(states[i]);
if (use_pc) {
if (dpstate->left_src_w && !found_l_top) {
dpstate->is_left_top = true;
found_l_top = true;
} else {
dpstate->is_left_top = false;
}
if (dpstate->right_src_w && !found_r_top) {
dpstate->is_right_top = true;
found_r_top = true;
} else {
dpstate->is_right_top = false;
}
} else {
dpstate->is_top = (i == (n - 1)) ? true : false;
}
}
}
static int
dpu_atomic_assign_plane_source_per_crtc(struct drm_plane_state **states,
int n, bool use_pc)
{
struct dpu_plane_state *dpstate;
struct dpu_plane *dplane;
struct dpu_plane_grp *grp;
struct drm_framebuffer *fb;
struct dpu_fetchunit *fu;
struct dpu_fetchunit *fe;
struct dpu_hscaler *hs;
struct dpu_vscaler *vs;
lb_prim_sel_t stage;
dpu_block_id_t blend;
unsigned int sid, src_sid;
unsigned int num_planes;
int bit;
int i, j, k = 0, m;
int total_asrc_num;
int s0_layer_cnt = 0, s1_layer_cnt = 0;
int s0_n = 0, s1_n = 0;
u32 src_a_mask, cap_mask, fe_mask, hs_mask, vs_mask;
bool need_fetcheco, need_hscaler, need_vscaler;
bool fmt_is_yuv;
bool alloc_aux_source;
if (use_pc) {
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
if (dpstate->left_src_w)
s0_n++;
if (dpstate->right_src_w)
s1_n++;
}
} else {
s0_n = n;
s1_n = n;
}
/* for active planes only */
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
dplane = to_dpu_plane(states[i]->plane);
fb = states[i]->fb;
num_planes = fb->format->num_planes;
fmt_is_yuv = drm_format_is_yuv(fb->format->format);
grp = dplane->grp;
alloc_aux_source = false;
if (use_pc)
sid = dpstate->left_src_w ? 0 : 1;
else
sid = dplane->stream_id;
again:
if (alloc_aux_source)
sid ^= 1;
need_fetcheco = (num_planes > 1);
need_hscaler = (states[i]->src_w >> 16 != states[i]->crtc_w);
need_vscaler = (states[i]->src_h >> 16 != states[i]->crtc_h);
total_asrc_num = 0;
src_a_mask = grp->src_a_mask;
fe_mask = 0;
hs_mask = 0;
vs_mask = 0;
for_each_set_bit(bit, (unsigned long *)&src_a_mask, 32)
total_asrc_num++;
/* assign source */
mutex_lock(&grp->mutex);
for (j = 0; j < total_asrc_num; j++) {
k = ffs(src_a_mask) - 1;
if (k < 0)
return -EINVAL;
fu = source_to_fu(&grp->res, sources[k]);
if (!fu)
return -EINVAL;
/* avoid on-the-fly/hot migration */
src_sid = fu->ops->get_stream_id(fu);
if (src_sid && src_sid != BIT(sid))
goto next;
if (fetchunit_is_fetchdecode(fu)) {
cap_mask = fetchdecode_get_vproc_mask(fu);
if (need_fetcheco) {
fe = fetchdecode_get_fetcheco(fu);
/* avoid on-the-fly/hot migration */
src_sid = fu->ops->get_stream_id(fe);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the fetcheco cap? */
if (!dpu_vproc_has_fetcheco_cap(cap_mask))
goto next;
fe_mask =
dpu_vproc_get_fetcheco_cap(cap_mask);
/* fetcheco available? */
if (grp->src_use_vproc_mask & fe_mask)
goto next;
}
if (need_hscaler) {
hs = fetchdecode_get_hscaler(fu);
/* avoid on-the-fly/hot migration */
src_sid = hscaler_get_stream_id(hs);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the hscale cap */
if (!dpu_vproc_has_hscale_cap(cap_mask))
goto next;
hs_mask =
dpu_vproc_get_hscale_cap(cap_mask);
/* hscaler available? */
if (grp->src_use_vproc_mask & hs_mask)
goto next;
}
if (need_vscaler) {
vs = fetchdecode_get_vscaler(fu);
/* avoid on-the-fly/hot migration */
src_sid = vscaler_get_stream_id(vs);
if (src_sid && src_sid != BIT(sid))
goto next;
/* fetch unit has the vscale cap? */
if (!dpu_vproc_has_vscale_cap(cap_mask))
goto next;
vs_mask =
dpu_vproc_get_vscale_cap(cap_mask);
/* vscaler available? */
if (grp->src_use_vproc_mask & vs_mask)
goto next;
}
} else {
if (fmt_is_yuv || need_fetcheco ||
need_hscaler || need_vscaler)
goto next;
}
grp->src_a_mask &= ~BIT(k);
grp->src_use_vproc_mask |= fe_mask | hs_mask | vs_mask;
break;
next:
src_a_mask &= ~BIT(k);
fe_mask = 0;
hs_mask = 0;
vs_mask = 0;
}
mutex_unlock(&grp->mutex);
if (j == total_asrc_num)
return -EINVAL;
if (alloc_aux_source)
dpstate->aux_source = sources[k];
else
dpstate->source = sources[k];
/* assign stage and blend */
if (sid) {
m = grp->hw_plane_num - (s1_n - s1_layer_cnt);
stage = s1_layer_cnt ? stages[m - 1] : cf_stages[sid];
blend = blends[m];
s1_layer_cnt++;
} else {
stage = s0_layer_cnt ?
stages[s0_layer_cnt - 1] : cf_stages[sid];
blend = blends[s0_layer_cnt];
s0_layer_cnt++;
}
if (alloc_aux_source) {
dpstate->aux_stage = stage;
dpstate->aux_blend = blend;
} else {
dpstate->stage = stage;
dpstate->blend = blend;
}
if (dpstate->need_aux_source && !alloc_aux_source) {
alloc_aux_source = true;
goto again;
}
}
return 0;
}
static void
dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(struct drm_crtc *crtc,
u32 crtc_mask,
struct drm_atomic_state *state,
bool *puts)
{
struct drm_plane *plane;
struct drm_plane_state *plane_state;
bool found_pstate = false;
int i;
if ((crtc_mask & drm_crtc_mask(crtc)) == 0) {
for_each_new_plane_in_state(state, plane, plane_state, i) {
if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
found_pstate = true;
break;
}
}
if (!found_pstate)
puts[drm_crtc_index(crtc)] = true;
}
}
static void
dpu_atomic_put_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane)
{
int index = drm_plane_index(plane);
plane->funcs->atomic_destroy_state(plane, state->planes[index].state);
state->planes[index].ptr = NULL;
state->planes[index].state = NULL;
drm_modeset_unlock(&plane->mutex);
}
static void
dpu_atomic_put_crtc_state(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
int index = drm_crtc_index(crtc);
crtc->funcs->atomic_destroy_state(crtc, state->crtcs[index].state);
state->crtcs[index].ptr = NULL;
state->crtcs[index].state = NULL;
drm_modeset_unlock(&crtc->mutex);
}
static void
dpu_atomic_put_possible_states_per_crtc(struct drm_crtc_state *crtc_state)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_crtc_state *old_crtc_state = crtc->state;
struct drm_plane *plane;
struct drm_plane_state *plane_state;
struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
struct dpu_plane_state **old_dpstates;
struct dpu_plane_state *old_dpstate, *new_dpstate;
u32 active_mask = 0;
int i;
old_dpstates = crtc_state_get_dpu_plane_states(old_crtc_state);
if (WARN_ON(!old_dpstates))
return;
for (i = 0; i < dplane->grp->hw_plane_num; i++) {
old_dpstate = old_dpstates[i];
if (!old_dpstate)
continue;
active_mask |= BIT(i);
drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
if (drm_plane_index(plane) !=
drm_plane_index(old_dpstate->base.plane))
continue;
plane_state =
drm_atomic_get_existing_plane_state(state,
plane);
if (WARN_ON(!plane_state))
return;
new_dpstate = to_dpu_plane_state(plane_state);
active_mask &= ~BIT(i);
/*
* Should be enough to check the below real HW plane
* resources only.
* Things like vproc resources should be fine.
*/
if (old_dpstate->stage != new_dpstate->stage ||
old_dpstate->source != new_dpstate->source ||
old_dpstate->blend != new_dpstate->blend ||
old_dpstate->aux_stage != new_dpstate->aux_stage ||
old_dpstate->aux_source != new_dpstate->aux_source ||
old_dpstate->aux_blend != new_dpstate->aux_blend)
return;
}
}
/* pure software check */
if (WARN_ON(active_mask))
return;
drm_atomic_crtc_state_for_each_plane(plane, crtc_state)
dpu_atomic_put_plane_state(state, plane);
dpu_atomic_put_crtc_state(state, crtc);
}
static int dpu_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct dpu_plane *dpu_plane;
struct drm_plane_state *plane_state;
struct dpu_plane_state *dpstate;
struct drm_framebuffer *fb;
struct dpu_plane_grp *grp[MAX_DPU_PLANE_GRP];
int ret, i, grp_id;
int active_plane[MAX_DPU_PLANE_GRP];
int active_plane_fetcheco[MAX_DPU_PLANE_GRP];
int active_plane_hscale[MAX_DPU_PLANE_GRP];
int active_plane_vscale[MAX_DPU_PLANE_GRP];
int half_hdisplay = 0;
bool pipe_states_prone_to_put[MAX_CRTC];
bool use_pc[MAX_DPU_PLANE_GRP];
u32 crtc_mask_in_state = 0;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret) {
DRM_DEBUG_KMS("%s: failed to check modeset\n", __func__);
return ret;
}
for (i = 0; i < MAX_CRTC; i++)
pipe_states_prone_to_put[i] = false;
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
active_plane[i] = 0;
active_plane_fetcheco[i] = 0;
active_plane_hscale[i] = 0;
active_plane_vscale[i] = 0;
use_pc[i] = false;
grp[i] = NULL;
}
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
crtc_mask_in_state |= drm_crtc_mask(crtc);
drm_for_each_crtc(crtc, dev) {
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct imx_crtc_state *imx_crtc_state;
struct dpu_crtc_state *dcstate;
bool need_left, need_right, need_aux_source, use_pc_per_crtc;
use_pc_per_crtc = false;
dpu_atomic_mark_pipe_states_prone_to_put_per_crtc(crtc,
crtc_mask_in_state, state,
pipe_states_prone_to_put);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (WARN_ON(IS_ERR(crtc_state)))
return PTR_ERR(crtc_state);
imx_crtc_state = to_imx_crtc_state(crtc_state);
dcstate = to_dpu_crtc_state(imx_crtc_state);
if (crtc_state->enable) {
if (use_pc[dpu_crtc->crtc_grp_id]) {
DRM_DEBUG_KMS("other crtc needs pixel combiner\n");
return -EINVAL;
}
if (crtc_state->adjusted_mode.clock >
dpu_crtc->syncmode_min_prate ||
crtc_state->adjusted_mode.hdisplay >
dpu_crtc->singlemode_max_width)
use_pc_per_crtc = true;
}
if (use_pc_per_crtc) {
use_pc[dpu_crtc->crtc_grp_id] = true;
half_hdisplay = crtc_state->adjusted_mode.hdisplay >> 1;
}
dcstate->use_pc = use_pc_per_crtc;
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
DRM_DEBUG_KMS("failed to get plane state\n");
return PTR_ERR(plane_state);
}
dpstate = to_dpu_plane_state(plane_state);
fb = plane_state->fb;
dpu_plane = to_dpu_plane(plane);
grp_id = dpu_plane->grp->id;
active_plane[grp_id]++;
need_left = false;
need_right = false;
need_aux_source = false;
if (use_pc_per_crtc) {
if (plane_state->crtc_x < half_hdisplay)
need_left = true;
if ((plane_state->crtc_w +
plane_state->crtc_x) > half_hdisplay)
need_right = true;
if (need_left && need_right) {
need_aux_source = true;
active_plane[grp_id]++;
}
}
if (need_left && need_right) {
dpstate->left_crtc_w = half_hdisplay;
dpstate->left_crtc_w -= plane_state->crtc_x;
dpstate->left_src_w = dpstate->left_crtc_w;
} else if (need_left) {
dpstate->left_crtc_w = plane_state->crtc_w;
dpstate->left_src_w = plane_state->src_w >> 16;
} else {
dpstate->left_crtc_w = 0;
dpstate->left_src_w = 0;
}
if (need_right && need_left) {
dpstate->right_crtc_w = plane_state->crtc_x +
plane_state->crtc_w;
dpstate->right_crtc_w -= half_hdisplay;
dpstate->right_src_w = dpstate->right_crtc_w;
} else if (need_right) {
dpstate->right_crtc_w = plane_state->crtc_w;
dpstate->right_src_w = plane_state->src_w >> 16;
} else {
dpstate->right_crtc_w = 0;
dpstate->right_src_w = 0;
}
if (fb->format->num_planes > 1) {
active_plane_fetcheco[grp_id]++;
if (need_aux_source)
active_plane_fetcheco[grp_id]++;
}
if (plane_state->src_w >> 16 != plane_state->crtc_w) {
if (use_pc_per_crtc)
return -EINVAL;
active_plane_hscale[grp_id]++;
}
if (plane_state->src_h >> 16 != plane_state->crtc_h) {
if (use_pc_per_crtc)
return -EINVAL;
active_plane_vscale[grp_id]++;
}
if (grp[grp_id] == NULL)
grp[grp_id] = dpu_plane->grp;
dpstate->need_aux_source = need_aux_source;
}
}
/* enough resources? */
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
if (!grp[i])
continue;
if (active_plane[i] > grp[i]->hw_plane_num) {
DRM_DEBUG_KMS("no enough fetch units\n");
return -EINVAL;
}
if (active_plane_fetcheco[i] > grp[i]->hw_plane_fetcheco_num) {
DRM_DEBUG_KMS("no enough FetchEcos\n");
return -EINVAL;
}
if (active_plane_hscale[i] > grp[i]->hw_plane_hscaler_num) {
DRM_DEBUG_KMS("no enough Hscalers\n");
return -EINVAL;
}
if (active_plane_vscale[i] > grp[i]->hw_plane_vscaler_num) {
DRM_DEBUG_KMS("no enough Vscalers\n");
return -EINVAL;
}
}
/* initialize resource mask */
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
if (!grp[i])
continue;
mutex_lock(&grp[i]->mutex);
grp[i]->src_a_mask = grp[i]->src_mask;
grp[i]->src_use_vproc_mask = 0;
mutex_unlock(&grp[i]->mutex);
}
ret = drm_atomic_normalize_zpos(dev, state);
if (ret)
return ret;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_plane_state **states;
int n;
states = dpu_atomic_alloc_tmp_planes_per_crtc(dev);
if (IS_ERR(states)) {
DRM_DEBUG_KMS(
"[CRTC:%d:%s] cannot alloc plane state ptrs\n",
crtc->base.id, crtc->name);
return PTR_ERR(states);
}
n = dpu_atomic_sort_planes_per_crtc(crtc_state, states);
if (n < 0) {
DRM_DEBUG_KMS("[CRTC:%d:%s] failed to sort planes\n",
crtc->base.id, crtc->name);
kfree(states);
return n;
}
/* no active planes? */
if (n == 0) {
kfree(states);
continue;
}
if (use_pc[dpu_crtc->crtc_grp_id])
dpu_atomic_compute_plane_lrx_per_crtc(crtc_state,
states, n);
dpu_atomic_set_top_plane_per_crtc(states, n,
use_pc[dpu_crtc->crtc_grp_id]);
ret = dpu_atomic_assign_plane_source_per_crtc(states, n,
use_pc[dpu_crtc->crtc_grp_id]);
if (ret) {
DRM_DEBUG_KMS("[CRTC:%d:%s] cannot assign plane rscs\n",
crtc->base.id, crtc->name);
kfree(states);
return ret;
}
kfree(states);
}
drm_for_each_crtc(crtc, dev) {
if (pipe_states_prone_to_put[drm_crtc_index(crtc)]) {
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (WARN_ON(IS_ERR(crtc_state)))
return PTR_ERR(crtc_state);
dpu_atomic_put_possible_states_per_crtc(crtc_state);
}
}
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) {
DRM_DEBUG_KMS("%s: failed to check planes\n", __func__);
return ret;
}
return ret;
}
const struct drm_mode_config_funcs dpu_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = dpu_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};

View File

@ -0,0 +1,20 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef _DPU_KMS_H_
#define _DPU_KMS_H_
extern const struct drm_mode_config_funcs dpu_drm_mode_config_funcs;
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,195 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef __DPU_PLANE_H__
#define __DPU_PLANE_H__
#include <video/dpu.h>
#include "../imx-drm.h"
#define MAX_DPU_PLANE_GRP (MAX_CRTC / 2)
enum dpu_plane_src_type {
DPU_PLANE_SRC_FL,
DPU_PLANE_SRC_FW,
DPU_PLANE_SRC_FD,
};
struct dpu_plane {
struct drm_plane base;
struct dpu_plane_grp *grp;
struct list_head head;
unsigned int stream_id;
};
struct dpu_plane_state {
struct drm_plane_state base;
lb_prim_sel_t stage;
lb_sec_sel_t source;
dpu_block_id_t blend;
lb_prim_sel_t aux_stage;
lb_sec_sel_t aux_source;
dpu_block_id_t aux_blend;
bool is_top;
bool use_prefetch;
bool use_aux_prefetch;
bool need_aux_source;
/* used when pixel combiner is needed */
unsigned int left_src_w;
unsigned int left_crtc_w;
unsigned int left_crtc_x;
unsigned int right_src_w;
unsigned int right_crtc_w;
unsigned int right_crtc_x;
bool is_left_top;
bool is_right_top;
};
static const lb_prim_sel_t cf_stages[] = {LB_PRIM_SEL__CONSTFRAME0,
LB_PRIM_SEL__CONSTFRAME1};
static const lb_prim_sel_t stages[] = {LB_PRIM_SEL__LAYERBLEND0,
LB_PRIM_SEL__LAYERBLEND1,
LB_PRIM_SEL__LAYERBLEND2,
LB_PRIM_SEL__LAYERBLEND3};
/* TODO: Add source entries for subsidiary layers. */
static const lb_sec_sel_t sources[] = {LB_SEC_SEL__FETCHLAYER0,
LB_SEC_SEL__FETCHWARP2,
LB_SEC_SEL__FETCHDECODE0,
LB_SEC_SEL__FETCHDECODE1};
static const dpu_block_id_t blends[] = {ID_LAYERBLEND0, ID_LAYERBLEND1,
ID_LAYERBLEND2, ID_LAYERBLEND3};
static inline struct dpu_plane *to_dpu_plane(struct drm_plane *plane)
{
return container_of(plane, struct dpu_plane, base);
}
static inline struct dpu_plane_state *
to_dpu_plane_state(struct drm_plane_state *plane_state)
{
return container_of(plane_state, struct dpu_plane_state, base);
}
static inline int source_to_type(lb_sec_sel_t source)
{
switch (source) {
case LB_SEC_SEL__FETCHLAYER0:
return DPU_PLANE_SRC_FL;
case LB_SEC_SEL__FETCHWARP2:
return DPU_PLANE_SRC_FW;
case LB_SEC_SEL__FETCHDECODE0:
case LB_SEC_SEL__FETCHDECODE1:
return DPU_PLANE_SRC_FD;
default:
break;
}
WARN_ON(1);
return -EINVAL;
}
static inline int source_to_id(lb_sec_sel_t source)
{
int i, offset = 0;
int type = source_to_type(source);
for (i = 0; i < ARRAY_SIZE(sources); i++) {
if (source != sources[i])
continue;
/* FetchLayer */
if (type == DPU_PLANE_SRC_FL)
return i;
/* FetchWarp or FetchDecode */
while (offset < ARRAY_SIZE(sources)) {
if (source_to_type(sources[offset]) == type)
break;
offset++;
}
return i - offset;
}
WARN_ON(1);
return -EINVAL;
}
static inline struct dpu_fetchunit *
source_to_fu(struct dpu_plane_res *res, lb_sec_sel_t source)
{
int fu_type = source_to_type(source);
int fu_id = source_to_id(source);
if (fu_type < 0 || fu_id < 0)
return NULL;
switch (fu_type) {
case DPU_PLANE_SRC_FD:
return res->fd[fu_id];
case DPU_PLANE_SRC_FL:
return res->fl[fu_id];
case DPU_PLANE_SRC_FW:
return res->fw[fu_id];
}
return NULL;
}
static inline struct dpu_fetchunit *
dpstate_to_fu(struct dpu_plane_state *dpstate)
{
struct drm_plane *plane = dpstate->base.plane;
struct dpu_plane *dplane = to_dpu_plane(plane);
struct dpu_plane_res *res = &dplane->grp->res;
return source_to_fu(res, dpstate->source);
}
static inline int blend_to_id(dpu_block_id_t blend)
{
int i;
for (i = 0; i < ARRAY_SIZE(blends); i++) {
if (blend == blends[i])
return i;
}
WARN_ON(1);
return -EINVAL;
}
static inline bool drm_format_is_yuv(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return true;
default:
break;
}
return false;
}
struct dpu_plane *dpu_plane_create(struct drm_device *drm,
unsigned int possible_crtcs,
unsigned int stream_id,
struct dpu_plane_grp *grp,
enum drm_plane_type type);
#endif

View File

@ -343,23 +343,7 @@ static struct platform_driver imx_drm_pdrv = {
.of_match_table = imx_drm_dt_ids,
},
};
static struct platform_driver * const drivers[] = {
&imx_drm_pdrv,
&ipu_drm_driver,
};
static int __init imx_drm_init(void)
{
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(imx_drm_init);
static void __exit imx_drm_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(imx_drm_exit);
module_platform_driver(imx_drm_pdrv);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX drm driver core");

View File

@ -28,8 +28,6 @@ int imx_drm_init_drm(struct platform_device *pdev,
int preferred_bpp);
int imx_drm_exit_drm(void);
extern struct platform_driver ipu_drm_driver;
void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);

View File

@ -492,10 +492,16 @@ static int ipu_drm_remove(struct platform_device *pdev)
return 0;
}
struct platform_driver ipu_drm_driver = {
static struct platform_driver ipu_drm_driver = {
.driver = {
.name = "imx-ipuv3-crtc",
},
.probe = ipu_drm_probe,
.remove = ipu_drm_remove,
};
module_platform_driver(ipu_drm_driver);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-ipuv3-crtc");

2
drivers/gpu/imx/Kconfig Normal file
View File

@ -0,0 +1,2 @@
source "drivers/gpu/imx/ipu-v3/Kconfig"
source "drivers/gpu/imx/dpu/Kconfig"

2
drivers/gpu/imx/Makefile Normal file
View File

@ -0,0 +1,2 @@
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/
obj-$(CONFIG_IMX_DPU_CORE) += dpu/

View File

@ -0,0 +1,8 @@
config IMX_DPU_CORE
tristate "i.MX DPU core support"
depends on ARCH_MXC
select GENERIC_IRQ_CHIP
help
Choose this if you have a Freescale i.MX8QM or i.MX8QXP system and
want to use the Display Processing Unit. This option only enables
DPU base support.

View File

@ -0,0 +1,7 @@
obj-$(CONFIG_IMX_DPU_CORE) += imx-dpu-core.o
imx-dpu-core-objs := dpu-common.o dpu-constframe.o dpu-disengcfg.o \
dpu-extdst.o dpu-fetchdecode.o dpu-fetcheco.o \
dpu-fetchlayer.o dpu-fetchwarp.o dpu-fetchunit.o \
dpu-framegen.o dpu-hscaler.o dpu-layerblend.o \
dpu-sc-misc.o dpu-store.o dpu-tcon.o dpu-vscaler.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,253 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
static unsigned int safety_stream_cf_color = 0x0;
module_param(safety_stream_cf_color, uint, 0444);
MODULE_PARM_DESC(safety_stream_cf_color,
"Safety stream constframe color in hex(0xRRGGBBAA) [default=0x00000000]");
#define FRAMEDIMENSIONS 0xC
#define WIDTH(w) (((w) - 1) & 0x3FFF)
#define HEIGHT(h) ((((h) - 1) & 0x3FFF) << 16)
#define CONSTANTCOLOR 0x10
#define RED(r) (((r) & 0xFF) << 24)
#define GREEN(g) (((g) & 0xFF) << 16)
#define BLUE(b) (((b) & 0xFF) << 8)
#define ALPHA(a) ((a) & 0xFF)
#define CONTROLTRIGGER 0x14
#define START 0x18
#define STATUS 0x1C
struct dpu_constframe {
void __iomem *pec_base;
void __iomem *base;
struct mutex mutex;
int id;
bool inuse;
struct dpu_soc *dpu;
};
static inline u32 dpu_cf_read(struct dpu_constframe *cf, unsigned int offset)
{
return readl(cf->base + offset);
}
static inline void dpu_cf_write(struct dpu_constframe *cf,
unsigned int offset, u32 value)
{
writel(value, cf->base + offset);
}
void constframe_shden(struct dpu_constframe *cf, bool enable)
{
u32 val;
val = enable ? SHDEN : 0;
mutex_lock(&cf->mutex);
dpu_cf_write(cf, STATICCONTROL, val);
mutex_unlock(&cf->mutex);
}
EXPORT_SYMBOL_GPL(constframe_shden);
void constframe_framedimensions(struct dpu_constframe *cf, unsigned int w,
unsigned int h)
{
u32 val;
val = WIDTH(w) | HEIGHT(h);
mutex_lock(&cf->mutex);
dpu_cf_write(cf, FRAMEDIMENSIONS, val);
mutex_unlock(&cf->mutex);
}
EXPORT_SYMBOL_GPL(constframe_framedimensions);
void constframe_framedimensions_copy_prim(struct dpu_constframe *cf)
{
struct dpu_constframe *prim_cf = NULL;
unsigned int prim_id;
int i;
u32 val;
if (cf->id != 0 && cf->id != 1) {
dev_warn(cf->dpu->dev, "ConstFrame%d is not a secondary one\n",
cf->id);
return;
}
prim_id = cf->id + 4;
for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
if (cf_ids[i] == prim_id)
prim_cf = cf->dpu->cf_priv[i];
if (!prim_cf) {
dev_warn(cf->dpu->dev, "cannot find ConstFrame%d's primary peer\n",
cf->id);
return;
}
mutex_lock(&cf->mutex);
val = dpu_cf_read(prim_cf, FRAMEDIMENSIONS);
dpu_cf_write(cf, FRAMEDIMENSIONS, val);
mutex_unlock(&cf->mutex);
}
EXPORT_SYMBOL_GPL(constframe_framedimensions_copy_prim);
void constframe_constantcolor(struct dpu_constframe *cf, unsigned int r,
unsigned int g, unsigned int b, unsigned int a)
{
u32 val;
val = RED(r) | GREEN(g) | BLUE(b) | ALPHA(a);
mutex_lock(&cf->mutex);
dpu_cf_write(cf, CONSTANTCOLOR, val);
mutex_unlock(&cf->mutex);
}
EXPORT_SYMBOL_GPL(constframe_constantcolor);
void constframe_controltrigger(struct dpu_constframe *cf, bool trigger)
{
u32 val;
val = trigger ? SHDTOKGEN : 0;
mutex_lock(&cf->mutex);
dpu_cf_write(cf, CONTROLTRIGGER, val);
mutex_unlock(&cf->mutex);
}
EXPORT_SYMBOL_GPL(constframe_controltrigger);
struct dpu_constframe *dpu_cf_get(struct dpu_soc *dpu, int id)
{
struct dpu_constframe *cf;
int i;
for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
if (cf_ids[i] == id)
break;
if (i == ARRAY_SIZE(cf_ids))
return ERR_PTR(-EINVAL);
cf = dpu->cf_priv[i];
mutex_lock(&cf->mutex);
if (cf->inuse) {
mutex_unlock(&cf->mutex);
return ERR_PTR(-EBUSY);
}
cf->inuse = true;
mutex_unlock(&cf->mutex);
return cf;
}
EXPORT_SYMBOL_GPL(dpu_cf_get);
void dpu_cf_put(struct dpu_constframe *cf)
{
mutex_lock(&cf->mutex);
cf->inuse = false;
mutex_unlock(&cf->mutex);
}
EXPORT_SYMBOL_GPL(dpu_cf_put);
struct dpu_constframe *dpu_aux_cf_peek(struct dpu_constframe *cf)
{
unsigned int aux_id = cf->id ^ 1;
int i;
for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
if (cf_ids[i] == aux_id)
return cf->dpu->cf_priv[i];
return NULL;
}
EXPORT_SYMBOL_GPL(dpu_aux_cf_peek);
void _dpu_cf_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_constframe *cf;
int i;
for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
if (cf_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(cf_ids)))
return;
cf = dpu->cf_priv[i];
constframe_shden(cf, true);
if (id == 4 || id == 5) {
mutex_lock(&cf->mutex);
dpu_cf_write(cf, CONSTANTCOLOR, safety_stream_cf_color);
mutex_unlock(&cf->mutex);
}
}
int dpu_cf_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_constframe *cf;
int i;
cf = devm_kzalloc(dpu->dev, sizeof(*cf), GFP_KERNEL);
if (!cf)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(cf_ids); i++)
if (cf_ids[i] == id)
break;
if (i == ARRAY_SIZE(cf_ids))
return -EINVAL;
dpu->cf_priv[i] = cf;
cf->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16);
if (!cf->pec_base)
return -ENOMEM;
cf->base = devm_ioremap(dpu->dev, base, SZ_32);
if (!cf->base)
return -ENOMEM;
cf->dpu = dpu;
cf->id = id;
mutex_init(&cf->mutex);
_dpu_cf_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,142 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drm_mode.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include "dpu-prv.h"
#define CLOCKCTRL 0x8
typedef enum {
DSPCLKDIVIDE__DIV1, /* Ext disp clk signal has pix clk freq. */
DSPCLKDIVIDE__DIV2, /* Ext disp clk signal has 2x the pix clk freq. */
} clkdivide_t;
#define POLARITYCTRL 0xC
#define POLHS_HIGH BIT(0)
#define POLVS_HIGH BIT(1)
#define POLEN_HIGH BIT(2)
#define PIXINV_INV BIT(3)
#define SRCSELECT 0x10
struct dpu_disengcfg {
void __iomem *base;
struct mutex mutex;
int id;
bool inuse;
struct dpu_soc *dpu;
};
static inline u32 dpu_dec_read(struct dpu_disengcfg *dec, unsigned int offset)
{
return readl(dec->base + offset);
}
static inline void dpu_dec_write(struct dpu_disengcfg *dec,
unsigned int offset, u32 value)
{
writel(value, dec->base + offset);
}
struct dpu_disengcfg *dpu_dec_get(struct dpu_soc *dpu, int id)
{
struct dpu_disengcfg *dec;
int i;
for (i = 0; i < ARRAY_SIZE(dec_ids); i++)
if (dec_ids[i] == id)
break;
if (i == ARRAY_SIZE(dec_ids))
return ERR_PTR(-EINVAL);
dec = dpu->dec_priv[i];
mutex_lock(&dec->mutex);
if (dec->inuse) {
mutex_unlock(&dec->mutex);
return ERR_PTR(-EBUSY);
}
dec->inuse = true;
mutex_unlock(&dec->mutex);
return dec;
}
EXPORT_SYMBOL_GPL(dpu_dec_get);
void dpu_dec_put(struct dpu_disengcfg *dec)
{
mutex_lock(&dec->mutex);
dec->inuse = false;
mutex_unlock(&dec->mutex);
}
EXPORT_SYMBOL_GPL(dpu_dec_put);
struct dpu_disengcfg *dpu_aux_dec_peek(struct dpu_disengcfg *dec)
{
return dec->dpu->dec_priv[dec->id ^ 1];
}
EXPORT_SYMBOL_GPL(dpu_aux_dec_peek);
void _dpu_dec_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_disengcfg *dec;
u32 val;
int i;
for (i = 0; i < ARRAY_SIZE(dec_ids); i++)
if (ed_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(dec_ids)))
return;
dec = dpu->dec_priv[i];
val = dpu_dec_read(dec, POLARITYCTRL);
val &= ~POLHS_HIGH;
val &= ~POLVS_HIGH;
dpu_dec_write(dec, POLARITYCTRL, val);
}
int dpu_dec_init(struct dpu_soc *dpu, unsigned int id,
unsigned long unused, unsigned long base)
{
struct dpu_disengcfg *dec;
dec = devm_kzalloc(dpu->dev, sizeof(*dec), GFP_KERNEL);
if (!dec)
return -ENOMEM;
dpu->dec_priv[id] = dec;
dec->base = devm_ioremap(dpu->dev, base, SZ_16);
if (!dec->base)
return -ENOMEM;
dec->dpu = dpu;
dec->id = id;
mutex_init(&dec->mutex);
_dpu_dec_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,521 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define PIXENGCFG_STATIC 0x8
#define POWERDOWN BIT(4)
#define SYNC_MODE BIT(8)
#define SW_RESET BIT(11)
#define DIV(n) (((n) & 0xFF) << 16)
#define DIV_RESET 0x80
#define PIXENGCFG_DYNAMIC 0xC
#define PIXENGCFG_REQUEST 0x10
#define SHDLDREQ(n) BIT(n)
#define SEL_SHDLDREQ BIT(0)
#define PIXENGCFG_TRIGGER 0x14
#define SYNC_TRIGGER BIT(0)
#define TRIGGER_SEQUENCE_COMPLETE BIT(4)
#define PIXENGCFG_STATUS 0x18
#define SYNC_BUSY BIT(8)
#define KICK_MODE BIT(8)
#define PERFCOUNTMODE BIT(12)
#define CONTROL 0xC
#define GAMMAAPPLYENABLE BIT(0)
#define SOFTWAREKICK 0x10
#define KICK BIT(0)
#define STATUS 0x14
#define CNT_ERR_STS BIT(0)
#define CONTROLWORD 0x18
#define CURPIXELCNT 0x1C
static u16 get_xval(u32 pixel_cnt)
{
return pixel_cnt & 0xFFFF;
}
static u16 get_yval(u32 pixel_cnt)
{
return pixel_cnt >> 16;
}
#define LASTPIXELCNT 0x20
#define PERFCOUNTER 0x24
struct dpu_extdst {
void __iomem *pec_base;
void __iomem *base;
struct mutex mutex;
int id;
bool inuse;
struct dpu_soc *dpu;
};
static inline u32 dpu_pec_ed_read(struct dpu_extdst *ed, unsigned int offset)
{
return readl(ed->pec_base + offset);
}
static inline void dpu_pec_ed_write(struct dpu_extdst *ed,
unsigned int offset, u32 value)
{
writel(value, ed->pec_base + offset);
}
static inline u32 dpu_ed_read(struct dpu_extdst *ed, unsigned int offset)
{
return readl(ed->base + offset);
}
static inline void dpu_ed_write(struct dpu_extdst *ed,
unsigned int offset, u32 value)
{
writel(value, ed->base + offset);
}
static inline bool dpu_ed_is_safety_stream(struct dpu_extdst *ed)
{
if (ed->id == 4 || ed->id == 5)
return true;
return false;
}
void extdst_pixengcfg_shden(struct dpu_extdst *ed, bool enable)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
if (enable)
val |= SHDEN;
else
val &= ~SHDEN;
dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_shden);
void extdst_pixengcfg_powerdown(struct dpu_extdst *ed, bool powerdown)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
if (powerdown)
val |= POWERDOWN;
else
val &= ~POWERDOWN;
dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_powerdown);
void extdst_pixengcfg_sync_mode(struct dpu_extdst *ed, ed_sync_mode_t mode)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
if (mode == AUTO)
val |= SYNC_MODE;
else
val &= ~SYNC_MODE;
dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_sync_mode);
void extdst_pixengcfg_reset(struct dpu_extdst *ed, bool reset)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
if (reset)
val |= SW_RESET;
else
val &= ~SW_RESET;
dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_reset);
void extdst_pixengcfg_div(struct dpu_extdst *ed, u16 div)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
val &= ~0xFF0000;
val |= DIV(div);
dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_div);
void extdst_pixengcfg_syncmode_master(struct dpu_extdst *ed, bool enable)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_STATIC);
if (enable)
val |= BIT(16);
else
val &= ~BIT(16);
dpu_pec_ed_write(ed, PIXENGCFG_STATIC, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_syncmode_master);
int extdst_pixengcfg_src_sel(struct dpu_extdst *ed, extdst_src_sel_t src)
{
mutex_lock(&ed->mutex);
dpu_pec_ed_write(ed, PIXENGCFG_DYNAMIC, src);
mutex_unlock(&ed->mutex);
return 0;
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_src_sel);
void extdst_pixengcfg_sel_shdldreq(struct dpu_extdst *ed)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_REQUEST);
val |= SEL_SHDLDREQ;
dpu_pec_ed_write(ed, PIXENGCFG_REQUEST, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_sel_shdldreq);
void extdst_pixengcfg_shdldreq(struct dpu_extdst *ed, u32 req_mask)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_REQUEST);
val |= req_mask;
dpu_pec_ed_write(ed, PIXENGCFG_REQUEST, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_shdldreq);
void extdst_pixengcfg_sync_trigger(struct dpu_extdst *ed)
{
mutex_lock(&ed->mutex);
dpu_pec_ed_write(ed, PIXENGCFG_TRIGGER, SYNC_TRIGGER);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_sync_trigger);
void extdst_pixengcfg_trigger_sequence_complete(struct dpu_extdst *ed)
{
mutex_lock(&ed->mutex);
dpu_pec_ed_write(ed, PIXENGCFG_TRIGGER, TRIGGER_SEQUENCE_COMPLETE);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_trigger_sequence_complete);
bool extdst_pixengcfg_is_sync_busy(struct dpu_extdst *ed)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_STATUS);
mutex_unlock(&ed->mutex);
return val & SYNC_BUSY;
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_is_sync_busy);
ed_pipeline_status_t extdst_pixengcfg_pipeline_status(struct dpu_extdst *ed)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_pec_ed_read(ed, PIXENGCFG_STATUS);
mutex_unlock(&ed->mutex);
return val & 0x3;
}
EXPORT_SYMBOL_GPL(extdst_pixengcfg_pipeline_status);
void extdst_shden(struct dpu_extdst *ed, bool enable)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, STATICCONTROL);
if (enable)
val |= SHDEN;
else
val &= ~SHDEN;
dpu_ed_write(ed, STATICCONTROL, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_shden);
void extdst_kick_mode(struct dpu_extdst *ed, ed_kick_mode_t mode)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, STATICCONTROL);
val &= ~KICK_MODE;
val |= mode;
dpu_ed_write(ed, STATICCONTROL, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_kick_mode);
void extdst_perfcountmode(struct dpu_extdst *ed, bool enable)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, STATICCONTROL);
if (enable)
val |= PERFCOUNTMODE;
else
val &= ~PERFCOUNTMODE;
dpu_ed_write(ed, STATICCONTROL, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_perfcountmode);
void extdst_gamma_apply_enable(struct dpu_extdst *ed, bool enable)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, CONTROL);
if (enable)
val |= GAMMAAPPLYENABLE;
else
val &= ~GAMMAAPPLYENABLE;
dpu_ed_write(ed, CONTROL, val);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_gamma_apply_enable);
void extdst_kick(struct dpu_extdst *ed)
{
mutex_lock(&ed->mutex);
dpu_ed_write(ed, SOFTWAREKICK, KICK);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_kick);
void extdst_cnt_err_clear(struct dpu_extdst *ed)
{
mutex_lock(&ed->mutex);
dpu_ed_write(ed, STATUS, CNT_ERR_STS);
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(extdst_cnt_err_clear);
bool extdst_cnt_err_status(struct dpu_extdst *ed)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, STATUS);
mutex_unlock(&ed->mutex);
return val & CNT_ERR_STS;
}
EXPORT_SYMBOL_GPL(extdst_cnt_err_status);
u32 extdst_last_control_word(struct dpu_extdst *ed)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, CONTROLWORD);
mutex_unlock(&ed->mutex);
return val;
}
EXPORT_SYMBOL_GPL(extdst_last_control_word);
void extdst_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, CURPIXELCNT);
mutex_unlock(&ed->mutex);
*x = get_xval(val);
*y = get_yval(val);
}
EXPORT_SYMBOL_GPL(extdst_pixel_cnt);
void extdst_last_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, LASTPIXELCNT);
mutex_unlock(&ed->mutex);
*x = get_xval(val);
*y = get_yval(val);
}
EXPORT_SYMBOL_GPL(extdst_last_pixel_cnt);
u32 extdst_perfresult(struct dpu_extdst *ed)
{
u32 val;
mutex_lock(&ed->mutex);
val = dpu_ed_read(ed, PERFCOUNTER);
mutex_unlock(&ed->mutex);
return val;
}
EXPORT_SYMBOL_GPL(extdst_perfresult);
bool extdst_is_master(struct dpu_extdst *ed)
{
const struct dpu_data *data = ed->dpu->data;
return ed->id == data->master_stream_id;
}
EXPORT_SYMBOL_GPL(extdst_is_master);
struct dpu_extdst *dpu_ed_get(struct dpu_soc *dpu, int id)
{
struct dpu_extdst *ed;
int i;
for (i = 0; i < ARRAY_SIZE(ed_ids); i++)
if (ed_ids[i] == id)
break;
if (i == ARRAY_SIZE(ed_ids))
return ERR_PTR(-EINVAL);
ed = dpu->ed_priv[i];
mutex_lock(&ed->mutex);
if (ed->inuse) {
mutex_unlock(&ed->mutex);
return ERR_PTR(-EBUSY);
}
ed->inuse = true;
mutex_unlock(&ed->mutex);
return ed;
}
EXPORT_SYMBOL_GPL(dpu_ed_get);
void dpu_ed_put(struct dpu_extdst *ed)
{
mutex_lock(&ed->mutex);
ed->inuse = false;
mutex_unlock(&ed->mutex);
}
EXPORT_SYMBOL_GPL(dpu_ed_put);
struct dpu_extdst *dpu_aux_ed_peek(struct dpu_extdst *ed)
{
unsigned int aux_id = ed->id ^ 1;
int i;
for (i = 0; i < ARRAY_SIZE(ed_ids); i++)
if (ed_ids[i] == aux_id)
return ed->dpu->ed_priv[i];
return NULL;
}
EXPORT_SYMBOL_GPL(dpu_aux_ed_peek);
void _dpu_ed_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_extdst *ed;
int i;
for (i = 0; i < ARRAY_SIZE(ed_ids); i++)
if (ed_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(ed_ids)))
return;
ed = dpu->ed_priv[i];
extdst_pixengcfg_src_sel(ed, ED_SRC_DISABLE);
extdst_pixengcfg_shden(ed, true);
extdst_pixengcfg_powerdown(ed, false);
extdst_pixengcfg_sync_mode(ed, SINGLE);
extdst_pixengcfg_reset(ed, false);
extdst_pixengcfg_div(ed, DIV_RESET);
extdst_shden(ed, true);
extdst_perfcountmode(ed, false);
extdst_kick_mode(ed, EXTERNAL);
}
int dpu_ed_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_extdst *ed;
int ret, i;
ed = devm_kzalloc(dpu->dev, sizeof(*ed), GFP_KERNEL);
if (!ed)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(ed_ids); i++)
if (ed_ids[i] == id)
break;
if (i == ARRAY_SIZE(ed_ids))
return -EINVAL;
dpu->ed_priv[i] = ed;
ed->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_32);
if (!ed->pec_base)
return -ENOMEM;
ed->base = devm_ioremap(dpu->dev, base, SZ_64);
if (!ed->base)
return -ENOMEM;
ed->dpu = dpu;
ed->id = id;
mutex_init(&ed->mutex);
ret = extdst_pixengcfg_src_sel(ed, ED_SRC_DISABLE);
if (ret < 0)
return ret;
_dpu_ed_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,676 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drm_blend.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
static const u32 fd_vproc_cap[2] = {
DPU_VPROC_CAP_HSCALER4 | DPU_VPROC_CAP_VSCALER4 |
DPU_VPROC_CAP_FETCHECO0,
DPU_VPROC_CAP_HSCALER5 | DPU_VPROC_CAP_VSCALER5 |
DPU_VPROC_CAP_FETCHECO1,
};
#define PIXENGCFG_DYNAMIC 0x8
static const fd_dynamic_src_sel_t fd_srcs[2][4] = {
{
FD_SRC_DISABLE, FD_SRC_FETCHECO0,
FD_SRC_FETCHDECODE1, FD_SRC_FETCHWARP2
}, {
FD_SRC_DISABLE, FD_SRC_FETCHECO1,
FD_SRC_FETCHDECODE0, FD_SRC_FETCHWARP2
},
};
#define PIXENGCFG_STATUS 0xC
#define RINGBUFSTARTADDR0 0x10
#define RINGBUFWRAPADDR0 0x14
#define FRAMEPROPERTIES0 0x18
#define BASEADDRESS0 0x1C
#define SOURCEBUFFERATTRIBUTES0 0x20
#define SOURCEBUFFERDIMENSION0 0x24
#define COLORCOMPONENTBITS0 0x28
#define COLORCOMPONENTSHIFT0 0x2C
#define LAYEROFFSET0 0x30
#define CLIPWINDOWOFFSET0 0x34
#define CLIPWINDOWDIMENSIONS0 0x38
#define CONSTANTCOLOR0 0x3C
#define LAYERPROPERTY0 0x40
#define FRAMEDIMENSIONS 0x44
#define FRAMERESAMPLING 0x48
#define DECODECONTROL 0x4C
#define SOURCEBUFFERLENGTH 0x50
#define CONTROL 0x54
#define CONTROLTRIGGER 0x58
#define START 0x5C
#define FETCHTYPE 0x60
#define DECODERSTATUS 0x64
#define READADDRESS0 0x68
#define BURSTBUFFERPROPERTIES 0x6C
#define STATUS 0x70
#define HIDDENSTATUS 0x74
struct dpu_fetchdecode {
struct dpu_fetchunit fu;
fetchtype_t fetchtype;
};
int fetchdecode_pixengcfg_dynamic_src_sel(struct dpu_fetchunit *fu,
fd_dynamic_src_sel_t src)
{
int i;
mutex_lock(&fu->mutex);
for (i = 0; i < 4; i++) {
if (fd_srcs[fu->id][i] == src) {
dpu_pec_fu_write(fu, PIXENGCFG_DYNAMIC, src);
mutex_unlock(&fu->mutex);
return 0;
}
}
mutex_unlock(&fu->mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(fetchdecode_pixengcfg_dynamic_src_sel);
static void
fetchdecode_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width,
unsigned int x_offset, unsigned int y_offset,
unsigned int mt_w, unsigned int mt_h,
int bpp, dma_addr_t baddr)
{
unsigned int burst_size, stride;
bool nonzero_mod = !!mt_w;
if (nonzero_mod) {
/* consider PRG x offset to calculate buffer address */
baddr += (x_offset % mt_w) * (bpp / 8);
burst_size = fetchunit_burst_size_fixup_tkt343664(baddr);
stride = width * (bpp / 8);
stride = fetchunit_stride_fixup_tkt339017(stride, burst_size,
baddr, nonzero_mod);
/* consider PRG y offset to calculate buffer address */
baddr += (y_offset % mt_h) * stride;
}
mutex_lock(&fu->mutex);
dpu_fu_write(fu, BASEADDRESS0, baddr);
mutex_unlock(&fu->mutex);
}
static void fetchdecode_set_src_bpp(struct dpu_fetchunit *fu, int bpp)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES0);
val &= ~0x3f0000;
val |= BITSPERPIXEL(bpp);
dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES0, val);
mutex_unlock(&fu->mutex);
}
static void
fetchdecode_set_src_stride(struct dpu_fetchunit *fu,
unsigned int width, unsigned int x_offset,
unsigned int mt_w, int bpp, unsigned int stride,
dma_addr_t baddr, bool use_prefetch)
{
unsigned int burst_size;
bool nonzero_mod = !!mt_w;
u32 val;
if (use_prefetch) {
/* consider PRG x offset to calculate buffer address */
if (nonzero_mod)
baddr += (x_offset % mt_w) * (bpp / 8);
burst_size = fetchunit_burst_size_fixup_tkt343664(baddr);
stride = width * (bpp / 8);
stride = fetchunit_stride_fixup_tkt339017(stride, burst_size,
baddr, nonzero_mod);
}
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES0);
val &= ~0xffff;
val |= STRIDE(stride);
dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES0, val);
mutex_unlock(&fu->mutex);
}
static void
fetchdecode_set_src_buf_dimensions(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h,
u32 unused, bool deinterlace)
{
u32 val;
if (deinterlace)
h /= 2;
val = LINEWIDTH(w) | LINECOUNT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, SOURCEBUFFERDIMENSION0, val);
mutex_unlock(&fu->mutex);
}
static void fetchdecode_set_fmt(struct dpu_fetchunit *fu,
u32 fmt,
enum drm_color_encoding color_encoding,
enum drm_color_range color_range,
bool deinterlace)
{
u32 val, bits, shift;
bool is_planar_yuv = false, is_rastermode_yuv422 = false;
bool is_yuv422upsamplingmode_interpolate = false;
bool is_inputselect_compact = false;
bool need_csc = false;
int i;
switch (fmt) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
is_rastermode_yuv422 = true;
is_yuv422upsamplingmode_interpolate = true;
need_csc = true;
break;
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
is_yuv422upsamplingmode_interpolate = true;
/* fall-through */
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
if (deinterlace)
is_yuv422upsamplingmode_interpolate = true;
is_planar_yuv = true;
is_rastermode_yuv422 = true;
is_inputselect_compact = true;
need_csc = true;
break;
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
is_planar_yuv = true;
is_yuv422upsamplingmode_interpolate = true;
is_inputselect_compact = true;
need_csc = true;
break;
default:
break;
}
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, CONTROL);
val &= ~YUV422UPSAMPLINGMODE_MASK;
val &= ~INPUTSELECT_MASK;
val &= ~RASTERMODE_MASK;
if (is_yuv422upsamplingmode_interpolate)
val |= YUV422UPSAMPLINGMODE(YUV422UPSAMPLINGMODE__INTERPOLATE);
else
val |= YUV422UPSAMPLINGMODE(YUV422UPSAMPLINGMODE__REPLICATE);
if (is_inputselect_compact)
val |= INPUTSELECT(INPUTSELECT__COMPPACK);
else
val |= INPUTSELECT(INPUTSELECT__INACTIVE);
if (is_rastermode_yuv422)
val |= RASTERMODE(RASTERMODE__YUV422);
else
val |= RASTERMODE(RASTERMODE__NORMAL);
dpu_fu_write(fu, CONTROL, val);
val = dpu_fu_read(fu, LAYERPROPERTY0);
val &= ~YUVCONVERSIONMODE_MASK;
if (need_csc) {
/* assuming fetchdecode always ouputs RGB pixel formats */
if (color_encoding == DRM_COLOR_YCBCR_BT709)
val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__ITU709);
else if (color_encoding == DRM_COLOR_YCBCR_BT601 &&
color_range == DRM_COLOR_YCBCR_FULL_RANGE)
val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__ITU601_FR);
else
val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__ITU601);
} else {
val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__OFF);
}
dpu_fu_write(fu, LAYERPROPERTY0, val);
mutex_unlock(&fu->mutex);
for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) {
if (dpu_pixel_format_matrix[i].pixel_format == fmt) {
bits = dpu_pixel_format_matrix[i].bits;
shift = dpu_pixel_format_matrix[i].shift;
if (is_planar_yuv) {
bits &= ~(U_BITS_MASK | V_BITS_MASK);
shift &= ~(U_SHIFT_MASK | V_SHIFT_MASK);
}
mutex_lock(&fu->mutex);
dpu_fu_write(fu, COLORCOMPONENTBITS0, bits);
dpu_fu_write(fu, COLORCOMPONENTSHIFT0, shift);
mutex_unlock(&fu->mutex);
return;
}
}
WARN_ON(1);
}
void fetchdecode_layeroffset(struct dpu_fetchunit *fu, unsigned int x,
unsigned int y)
{
u32 val;
val = LAYERXOFFSET(x) | LAYERYOFFSET(y);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, LAYEROFFSET0, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchdecode_layeroffset);
void fetchdecode_clipoffset(struct dpu_fetchunit *fu, unsigned int x,
unsigned int y)
{
u32 val;
val = CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CLIPWINDOWOFFSET0, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchdecode_clipoffset);
static void
fetchdecode_set_pixel_blend_mode(struct dpu_fetchunit *fu,
unsigned int pixel_blend_mode, u16 alpha,
u32 fb_format)
{
u32 mode = 0, val;
if (pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
mode = ALPHACONSTENABLE;
switch (fb_format) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
mode |= ALPHASRCENABLE;
break;
}
}
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY0);
val &= ~(PREMULCONSTRGB | ALPHA_ENABLE_MASK | RGB_ENABLE_MASK);
val |= mode;
dpu_fu_write(fu, LAYERPROPERTY0, val);
val = dpu_fu_read(fu, CONSTANTCOLOR0);
val &= ~CONSTANTALPHA_MASK;
val |= CONSTANTALPHA(alpha >> 8);
dpu_fu_write(fu, CONSTANTCOLOR0, val);
mutex_unlock(&fu->mutex);
}
static void fetchdecode_enable_src_buf(struct dpu_fetchunit *fu)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY0);
val |= SOURCEBUFFERENABLE;
dpu_fu_write(fu, LAYERPROPERTY0, val);
mutex_unlock(&fu->mutex);
}
static void fetchdecode_disable_src_buf(struct dpu_fetchunit *fu)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY0);
val &= ~SOURCEBUFFERENABLE;
dpu_fu_write(fu, LAYERPROPERTY0, val);
mutex_unlock(&fu->mutex);
}
static bool fetchdecode_is_enabled(struct dpu_fetchunit *fu)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY0);
mutex_unlock(&fu->mutex);
return !!(val & SOURCEBUFFERENABLE);
}
void fetchdecode_clipdimensions(struct dpu_fetchunit *fu, unsigned int w,
unsigned int h)
{
u32 val;
val = CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CLIPWINDOWDIMENSIONS0, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchdecode_clipdimensions);
static void
fetchdecode_set_framedimensions(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h,
bool deinterlace)
{
u32 val;
if (deinterlace)
h /= 2;
val = FRAMEWIDTH(w) | FRAMEHEIGHT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, FRAMEDIMENSIONS, val);
mutex_unlock(&fu->mutex);
}
void fetchdecode_rgb_constantcolor(struct dpu_fetchunit *fu,
u8 r, u8 g, u8 b, u8 a)
{
u32 val;
val = rgb_color(r, g, b, a);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONSTANTCOLOR0, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchdecode_rgb_constantcolor);
void fetchdecode_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v)
{
u32 val;
val = yuv_color(y, u, v);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONSTANTCOLOR0, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchdecode_yuv_constantcolor);
static void fetchdecode_set_controltrigger(struct dpu_fetchunit *fu)
{
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN);
mutex_unlock(&fu->mutex);
}
int fetchdecode_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type)
{
struct dpu_soc *dpu = fu->dpu;
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, FETCHTYPE);
val &= FETCHTYPE_MASK;
mutex_unlock(&fu->mutex);
switch (val) {
case FETCHTYPE__DECODE:
case FETCHTYPE__LAYER:
case FETCHTYPE__WARP:
case FETCHTYPE__ECO:
case FETCHTYPE__PERSP:
case FETCHTYPE__ROT:
case FETCHTYPE__DECODEL:
case FETCHTYPE__LAYERL:
case FETCHTYPE__ROTL:
break;
default:
dev_warn(dpu->dev, "Invalid fetch type %u for FetchDecode%d\n",
val, fu->id);
return -EINVAL;
}
*type = val;
return 0;
}
EXPORT_SYMBOL_GPL(fetchdecode_fetchtype);
u32 fetchdecode_get_vproc_mask(struct dpu_fetchunit *fu)
{
return fd_vproc_cap[fu->id];
}
EXPORT_SYMBOL_GPL(fetchdecode_get_vproc_mask);
struct dpu_fetchunit *fetchdecode_get_fetcheco(struct dpu_fetchunit *fu)
{
struct dpu_soc *dpu = fu->dpu;
switch (fu->id) {
case 0:
case 1:
return dpu->fe_priv[fu->id];
default:
WARN_ON(1);
}
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(fetchdecode_get_fetcheco);
bool fetchdecode_need_fetcheco(struct dpu_fetchunit *fu, u32 fmt)
{
struct dpu_fetchunit *fe = fetchdecode_get_fetcheco(fu);
if (IS_ERR_OR_NULL(fe))
return false;
switch (fmt) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(fetchdecode_need_fetcheco);
struct dpu_hscaler *fetchdecode_get_hscaler(struct dpu_fetchunit *fu)
{
struct dpu_soc *dpu = fu->dpu;
switch (fu->id) {
case 0:
case 2:
return dpu->hs_priv[0];
case 1:
case 3:
return dpu->hs_priv[1];
default:
WARN_ON(1);
}
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(fetchdecode_get_hscaler);
struct dpu_vscaler *fetchdecode_get_vscaler(struct dpu_fetchunit *fu)
{
struct dpu_soc *dpu = fu->dpu;
switch (fu->id) {
case 0:
case 2:
return dpu->vs_priv[0];
case 1:
case 3:
return dpu->vs_priv[1];
default:
WARN_ON(1);
}
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(fetchdecode_get_vscaler);
struct dpu_fetchunit *dpu_fd_get(struct dpu_soc *dpu, int id)
{
struct dpu_fetchunit *fu;
int i;
for (i = 0; i < ARRAY_SIZE(fd_ids); i++)
if (fd_ids[i] == id)
break;
if (i == ARRAY_SIZE(fd_ids))
return ERR_PTR(-EINVAL);
fu = dpu->fd_priv[i];
mutex_lock(&fu->mutex);
if (fu->inuse) {
mutex_unlock(&fu->mutex);
return ERR_PTR(-EBUSY);
}
fu->inuse = true;
mutex_unlock(&fu->mutex);
return fu;
}
EXPORT_SYMBOL_GPL(dpu_fd_get);
void dpu_fd_put(struct dpu_fetchunit *fu)
{
mutex_lock(&fu->mutex);
fu->inuse = false;
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(dpu_fd_put);
static const struct dpu_fetchunit_ops fd_ops = {
.set_burstlength = fetchunit_set_burstlength,
.set_baseaddress = fetchdecode_set_baseaddress,
.set_src_bpp = fetchdecode_set_src_bpp,
.set_src_stride = fetchdecode_set_src_stride,
.set_src_buf_dimensions = fetchdecode_set_src_buf_dimensions,
.set_fmt = fetchdecode_set_fmt,
.set_pixel_blend_mode = fetchdecode_set_pixel_blend_mode,
.enable_src_buf = fetchdecode_enable_src_buf,
.disable_src_buf = fetchdecode_disable_src_buf,
.is_enabled = fetchdecode_is_enabled,
.set_framedimensions = fetchdecode_set_framedimensions,
.set_controltrigger = fetchdecode_set_controltrigger,
.get_stream_id = fetchunit_get_stream_id,
.set_stream_id = fetchunit_set_stream_id,
.pin_off = fetchunit_pin_off,
.unpin_off = fetchunit_unpin_off,
.is_pinned_off = fetchunit_is_pinned_off,
};
void _dpu_fd_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_fetchunit *fu;
int i;
for (i = 0; i < ARRAY_SIZE(fd_ids); i++)
if (fd_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(fd_ids)))
return;
fu = dpu->fd_priv[i];
fetchdecode_pixengcfg_dynamic_src_sel(fu, FD_SRC_DISABLE);
fetchunit_baddr_autoupdate(fu, 0x0);
fetchunit_shden(fu, true);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, BURSTBUFFERMANAGEMENT,
SETNUMBUFFERS(16) | SETBURSTLENGTH(16));
mutex_unlock(&fu->mutex);
}
int dpu_fd_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_fetchdecode *fd;
struct dpu_fetchunit *fu;
int ret;
fd = devm_kzalloc(dpu->dev, sizeof(*fd), GFP_KERNEL);
if (!fd)
return -ENOMEM;
fu = &fd->fu;
dpu->fd_priv[id] = fu;
fu->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16);
if (!fu->pec_base)
return -ENOMEM;
fu->base = devm_ioremap(dpu->dev, base, SZ_1K);
if (!fu->base)
return -ENOMEM;
fu->dpu = dpu;
fu->id = id;
fu->type = FU_T_FD;
fu->ops = &fd_ops;
fu->name = "fetchdecode";
mutex_init(&fu->mutex);
ret = fetchdecode_pixengcfg_dynamic_src_sel(fu, FD_SRC_DISABLE);
if (ret < 0)
return ret;
ret = fetchdecode_fetchtype(fu, &fd->fetchtype);
if (ret < 0)
return ret;
_dpu_fd_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,410 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define BASEADDRESS0 0x10
#define SOURCEBUFFERATTRIBUTES0 0x14
#define SOURCEBUFFERDIMENSION0 0x18
#define COLORCOMPONENTBITS0 0x1C
#define COLORCOMPONENTSHIFT0 0x20
#define LAYEROFFSET0 0x24
#define CLIPWINDOWOFFSET0 0x28
#define CLIPWINDOWDIMENSIONS0 0x2C
#define CONSTANTCOLOR0 0x30
#define LAYERPROPERTY0 0x34
#define FRAMEDIMENSIONS 0x38
#define FRAMERESAMPLING 0x3C
#define CONTROL 0x40
#define CONTROLTRIGGER 0x44
#define START 0x48
#define FETCHTYPE 0x4C
#define BURSTBUFFERPROPERTIES 0x50
#define HIDDENSTATUS 0x54
struct dpu_fetcheco {
struct dpu_fetchunit fu;
};
static void
fetcheco_set_src_buf_dimensions(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h,
u32 fmt, bool deinterlace)
{
int width, height;
u32 val;
if (deinterlace) {
width = w;
height = h / 2;
} else {
width = dpu_format_plane_width(w, fmt, 1);
height = dpu_format_plane_height(h, fmt, 1);
}
switch (fmt) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
break;
default:
WARN(1, "Unsupported FetchEco pixel format 0x%08x\n", fmt);
return;
}
val = LINEWIDTH(width) | LINECOUNT(height);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, SOURCEBUFFERDIMENSION0, val);
mutex_unlock(&fu->mutex);
}
static void fetcheco_set_fmt(struct dpu_fetchunit *fu,
u32 fmt,
enum drm_color_encoding unused1,
enum drm_color_range unused2,
bool unused3)
{
u32 val, bits, shift;
int i, hsub, vsub;
unsigned int x, y;
switch (fmt) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
break;
default:
WARN(1, "Unsupported FetchEco pixel format 0x%08x\n", fmt);
return;
}
hsub = dpu_format_horz_chroma_subsampling(fmt);
switch (hsub) {
case 1:
x = 0x4;
break;
case 2:
x = 0x2;
break;
default:
WARN_ON(1);
return;
}
vsub = dpu_format_vert_chroma_subsampling(fmt);
switch (vsub) {
case 1:
y = 0x4;
break;
case 2:
y = 0x2;
break;
default:
WARN_ON(1);
return;
}
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, FRAMERESAMPLING);
val &= ~(DELTAX_MASK | DELTAY_MASK);
val |= DELTAX(x) | DELTAY(y);
dpu_fu_write(fu, FRAMERESAMPLING, val);
val = dpu_fu_read(fu, CONTROL);
val &= ~RASTERMODE_MASK;
val |= RASTERMODE(RASTERMODE__NORMAL);
dpu_fu_write(fu, CONTROL, val);
mutex_unlock(&fu->mutex);
for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) {
if (dpu_pixel_format_matrix[i].pixel_format == fmt) {
bits = dpu_pixel_format_matrix[i].bits;
shift = dpu_pixel_format_matrix[i].shift;
bits &= ~Y_BITS_MASK;
shift &= ~Y_SHIFT_MASK;
mutex_lock(&fu->mutex);
dpu_fu_write(fu, COLORCOMPONENTBITS0, bits);
dpu_fu_write(fu, COLORCOMPONENTSHIFT0, shift);
mutex_unlock(&fu->mutex);
return;
}
}
WARN_ON(1);
}
void fetcheco_layeroffset(struct dpu_fetchunit *fu, unsigned int x,
unsigned int y)
{
u32 val;
val = LAYERXOFFSET(x) | LAYERYOFFSET(y);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, LAYEROFFSET0, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetcheco_layeroffset);
void fetcheco_clipoffset(struct dpu_fetchunit *fu, unsigned int x,
unsigned int y)
{
u32 val;
val = CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CLIPWINDOWOFFSET0, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetcheco_clipoffset);
void fetcheco_clipdimensions(struct dpu_fetchunit *fu, unsigned int w,
unsigned int h)
{
u32 val;
val = CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CLIPWINDOWDIMENSIONS0, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetcheco_clipdimensions);
static void
fetcheco_set_framedimensions(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h,
bool deinterlace)
{
u32 val;
if (deinterlace)
h /= 2;
val = FRAMEWIDTH(w) | FRAMEHEIGHT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, FRAMEDIMENSIONS, val);
mutex_unlock(&fu->mutex);
}
void fetcheco_frameresampling(struct dpu_fetchunit *fu, unsigned int x,
unsigned int y)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, FRAMERESAMPLING);
val &= ~(DELTAX_MASK | DELTAY_MASK);
val |= DELTAX(x) | DELTAY(y);
dpu_fu_write(fu, FRAMERESAMPLING, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetcheco_frameresampling);
static void fetcheco_set_controltrigger(struct dpu_fetchunit *fu)
{
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN);
mutex_unlock(&fu->mutex);
}
int fetcheco_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type)
{
struct dpu_soc *dpu = fu->dpu;
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, FETCHTYPE);
val &= FETCHTYPE_MASK;
mutex_unlock(&fu->mutex);
switch (val) {
case FETCHTYPE__DECODE:
case FETCHTYPE__LAYER:
case FETCHTYPE__WARP:
case FETCHTYPE__ECO:
case FETCHTYPE__PERSP:
case FETCHTYPE__ROT:
case FETCHTYPE__DECODEL:
case FETCHTYPE__LAYERL:
case FETCHTYPE__ROTL:
break;
default:
dev_warn(dpu->dev, "Invalid fetch type %u for FetchEco%d\n",
val, fu->id);
return -EINVAL;
}
*type = val;
return 0;
}
EXPORT_SYMBOL_GPL(fetcheco_fetchtype);
dpu_block_id_t fetcheco_get_block_id(struct dpu_fetchunit *fu)
{
switch (fu->id) {
case 0:
return ID_FETCHECO0;
case 1:
return ID_FETCHECO1;
case 2:
return ID_FETCHECO2;
case 9:
return ID_FETCHECO9;
default:
WARN_ON(1);
}
return ID_NONE;
}
EXPORT_SYMBOL_GPL(fetcheco_get_block_id);
struct dpu_fetchunit *dpu_fe_get(struct dpu_soc *dpu, int id)
{
struct dpu_fetchunit *fu;
int i;
for (i = 0; i < ARRAY_SIZE(fe_ids); i++)
if (fe_ids[i] == id)
break;
if (i == ARRAY_SIZE(fe_ids))
return ERR_PTR(-EINVAL);
fu = dpu->fe_priv[i];
mutex_lock(&fu->mutex);
if (fu->inuse) {
mutex_unlock(&fu->mutex);
return ERR_PTR(-EBUSY);
}
fu->inuse = true;
mutex_unlock(&fu->mutex);
return fu;
}
EXPORT_SYMBOL_GPL(dpu_fe_get);
void dpu_fe_put(struct dpu_fetchunit *fu)
{
mutex_lock(&fu->mutex);
fu->inuse = false;
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(dpu_fe_put);
static const struct dpu_fetchunit_ops fe_ops = {
.set_burstlength = fetchunit_set_burstlength,
.set_baseaddress = fetchunit_set_baseaddress,
.set_src_bpp = fetchunit_set_src_bpp,
.set_src_stride = fetchunit_set_src_stride,
.set_src_buf_dimensions = fetcheco_set_src_buf_dimensions,
.set_fmt = fetcheco_set_fmt,
.enable_src_buf = fetchunit_enable_src_buf,
.disable_src_buf = fetchunit_disable_src_buf,
.is_enabled = fetchunit_is_enabled,
.set_framedimensions = fetcheco_set_framedimensions,
.set_controltrigger = fetcheco_set_controltrigger,
.get_stream_id = fetchunit_get_stream_id,
.set_stream_id = fetchunit_set_stream_id,
.pin_off = fetchunit_pin_off,
.unpin_off = fetchunit_unpin_off,
.is_pinned_off = fetchunit_is_pinned_off,
};
void _dpu_fe_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_fetchunit *fu;
int i;
for (i = 0; i < ARRAY_SIZE(fe_ids); i++)
if (fe_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(fe_ids)))
return;
fu = dpu->fe_priv[i];
fetchunit_shden(fu, true);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, BURSTBUFFERMANAGEMENT,
SETNUMBUFFERS(16) | SETBURSTLENGTH(16));
mutex_unlock(&fu->mutex);
}
int dpu_fe_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_fetcheco *fe;
struct dpu_fetchunit *fu;
int i;
fe = devm_kzalloc(dpu->dev, sizeof(*fe), GFP_KERNEL);
if (!fe)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(fe_ids); i++)
if (fe_ids[i] == id)
break;
if (i == ARRAY_SIZE(fe_ids))
return -EINVAL;
fu = &fe->fu;
dpu->fe_priv[i] = fu;
fu->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16);
if (!fu->pec_base)
return -ENOMEM;
fu->base = devm_ioremap(dpu->dev, base, SZ_128);
if (!fu->base)
return -ENOMEM;
fu->dpu = dpu;
fu->id = id;
fu->type = FU_T_FE;
fu->ops = &fe_ops;
fu->name = "fetcheco";
mutex_init(&fu->mutex);
_dpu_fe_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,297 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define PIXENGCFG_STATUS 0x8
#define BASEADDRESS(n) (0x10 + (n) * 0x28)
#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28)
#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28)
#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28)
#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28)
#define LAYEROFFSET(n) (0x24 + (n) * 0x28)
#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28)
#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28)
#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28)
#define LAYERPROPERTY(n) (0x34 + (n) * 0x28)
#define FRAMEDIMENSIONS 0x150
#define FRAMERESAMPLING 0x154
#define CONTROL 0x158
#define TRIGGERENABLE 0x15C
#define SHDLDREQ(lm) ((lm) & 0xFF)
#define CONTROLTRIGGER 0x160
#define START 0x164
#define FETCHTYPE 0x168
#define BURSTBUFFERPROPERTIES 0x16C
#define STATUS 0x170
#define HIDDENSTATUS 0x174
struct dpu_fetchlayer {
struct dpu_fetchunit fu;
fetchtype_t fetchtype;
};
static void
fetchlayer_set_src_buf_dimensions(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h,
u32 unused1, bool unused2)
{
u32 val;
val = LINEWIDTH(w) | LINECOUNT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, SOURCEBUFFERDIMENSION(fu->sub_id), val);
mutex_unlock(&fu->mutex);
}
static void fetchlayer_set_fmt(struct dpu_fetchunit *fu,
u32 fmt,
enum drm_color_encoding color_encoding,
enum drm_color_range color_range,
bool unused)
{
u32 val, bits, shift;
int i, sub_id = fu->sub_id;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY(sub_id));
val &= ~YUVCONVERSIONMODE_MASK;
val |= YUVCONVERSIONMODE(YUVCONVERSIONMODE__OFF);
dpu_fu_write(fu, LAYERPROPERTY(sub_id), val);
mutex_unlock(&fu->mutex);
for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) {
if (dpu_pixel_format_matrix[i].pixel_format == fmt) {
bits = dpu_pixel_format_matrix[i].bits;
shift = dpu_pixel_format_matrix[i].shift;
mutex_lock(&fu->mutex);
dpu_fu_write(fu, COLORCOMPONENTBITS(sub_id), bits);
dpu_fu_write(fu, COLORCOMPONENTSHIFT(sub_id), shift);
mutex_unlock(&fu->mutex);
return;
}
}
WARN_ON(1);
}
static void
fetchlayer_set_framedimensions(struct dpu_fetchunit *fu, unsigned int w,
unsigned int h, bool unused)
{
u32 val;
val = FRAMEWIDTH(w) | FRAMEHEIGHT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, FRAMEDIMENSIONS, val);
mutex_unlock(&fu->mutex);
}
void fetchlayer_rgb_constantcolor(struct dpu_fetchunit *fu,
u8 r, u8 g, u8 b, u8 a)
{
u32 val;
val = rgb_color(r, g, b, a);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchlayer_rgb_constantcolor);
void fetchlayer_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v)
{
u32 val;
val = yuv_color(y, u, v);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchlayer_yuv_constantcolor);
static void fetchlayer_set_controltrigger(struct dpu_fetchunit *fu)
{
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN);
mutex_unlock(&fu->mutex);
}
int fetchlayer_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type)
{
struct dpu_soc *dpu = fu->dpu;
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, FETCHTYPE);
val &= FETCHTYPE_MASK;
mutex_unlock(&fu->mutex);
switch (val) {
case FETCHTYPE__DECODE:
case FETCHTYPE__LAYER:
case FETCHTYPE__WARP:
case FETCHTYPE__ECO:
case FETCHTYPE__PERSP:
case FETCHTYPE__ROT:
case FETCHTYPE__DECODEL:
case FETCHTYPE__LAYERL:
case FETCHTYPE__ROTL:
break;
default:
dev_warn(dpu->dev, "Invalid fetch type %u for FetchLayer%d\n",
val, fu->id);
return -EINVAL;
}
*type = val;
return 0;
}
EXPORT_SYMBOL_GPL(fetchlayer_fetchtype);
struct dpu_fetchunit *dpu_fl_get(struct dpu_soc *dpu, int id)
{
struct dpu_fetchunit *fu;
int i;
for (i = 0; i < ARRAY_SIZE(fl_ids); i++)
if (fl_ids[i] == id)
break;
if (i == ARRAY_SIZE(fl_ids))
return ERR_PTR(-EINVAL);
fu = dpu->fl_priv[i];
mutex_lock(&fu->mutex);
if (fu->inuse) {
mutex_unlock(&fu->mutex);
return ERR_PTR(-EBUSY);
}
fu->inuse = true;
mutex_unlock(&fu->mutex);
return fu;
}
EXPORT_SYMBOL_GPL(dpu_fl_get);
void dpu_fl_put(struct dpu_fetchunit *fu)
{
mutex_lock(&fu->mutex);
fu->inuse = false;
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(dpu_fl_put);
static const struct dpu_fetchunit_ops fl_ops = {
.set_burstlength = fetchunit_set_burstlength,
.set_baseaddress = fetchunit_set_baseaddress,
.set_src_bpp = fetchunit_set_src_bpp,
.set_src_stride = fetchunit_set_src_stride,
.set_src_buf_dimensions = fetchlayer_set_src_buf_dimensions,
.set_fmt = fetchlayer_set_fmt,
.set_pixel_blend_mode = fetchunit_set_pixel_blend_mode,
.enable_src_buf = fetchunit_enable_src_buf,
.disable_src_buf = fetchunit_disable_src_buf,
.is_enabled = fetchunit_is_enabled,
.set_framedimensions = fetchlayer_set_framedimensions,
.set_controltrigger = fetchlayer_set_controltrigger,
.get_stream_id = fetchunit_get_stream_id,
.set_stream_id = fetchunit_set_stream_id,
.pin_off = fetchunit_pin_off,
.unpin_off = fetchunit_unpin_off,
.is_pinned_off = fetchunit_is_pinned_off,
};
void _dpu_fl_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_fetchunit *fu;
int i;
for (i = 0; i < ARRAY_SIZE(fl_ids); i++)
if (fl_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(fl_ids)))
return;
fu = dpu->fl_priv[i];
fetchunit_baddr_autoupdate(fu, 0x0);
fetchunit_shden(fu, true);
fetchunit_shdldreq_sticky(fu, 0xFF);
fetchunit_disable_src_buf(fu);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, BURSTBUFFERMANAGEMENT,
SETNUMBUFFERS(16) | SETBURSTLENGTH(16));
mutex_unlock(&fu->mutex);
}
int dpu_fl_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_fetchlayer *fl;
struct dpu_fetchunit *fu;
int ret;
fl = devm_kzalloc(dpu->dev, sizeof(*fl), GFP_KERNEL);
if (!fl)
return -ENOMEM;
fu = &fl->fu;
dpu->fl_priv[id] = fu;
fu->pec_base = devm_ioremap(dpu->dev, base, SZ_16);
if (!fu->pec_base)
return -ENOMEM;
fu->base = devm_ioremap(dpu->dev, base, SZ_512);
if (!fu->base)
return -ENOMEM;
fu->dpu = dpu;
fu->id = id;
fu->sub_id = 0;
fu->type = FU_T_FL;
fu->ops = &fl_ops;
fu->name = "fetchlayer";
mutex_init(&fu->mutex);
ret = fetchlayer_fetchtype(fu, &fl->fetchtype);
if (ret < 0)
return ret;
_dpu_fl_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,373 @@
/*
* Copyright 2018-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drm_blend.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define BASEADDRESS(n) (0x10 + (n) * 0x28)
#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28)
#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28)
#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28)
#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28)
#define LAYEROFFSET(n) (0x24 + (n) * 0x28)
#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28)
#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28)
#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28)
#define LAYERPROPERTY(n) (0x34 + (n) * 0x28)
/* base address has to align to burst size */
unsigned int fetchunit_burst_size_fixup_tkt343664(dma_addr_t baddr)
{
unsigned int burst_size;
burst_size = 1 << (ffs(baddr) - 1);
burst_size = round_up(burst_size, 8);
burst_size = min(burst_size, 128U);
return burst_size;
}
EXPORT_SYMBOL_GPL(fetchunit_burst_size_fixup_tkt343664);
/* fixup for burst size vs stride mismatch */
unsigned int
fetchunit_stride_fixup_tkt339017(unsigned int stride, unsigned int burst_size,
dma_addr_t baddr, bool nonzero_mod)
{
if (nonzero_mod)
stride = round_up(stride + round_up(baddr % 8, 8), burst_size);
else
stride = round_up(stride, burst_size);
return stride;
}
EXPORT_SYMBOL_GPL(fetchunit_stride_fixup_tkt339017);
void fetchunit_get_dprc(struct dpu_fetchunit *fu, void *data)
{
if (WARN_ON(!fu))
return;
fu->dprc = data;
}
EXPORT_SYMBOL_GPL(fetchunit_get_dprc);
void fetchunit_shden(struct dpu_fetchunit *fu, bool enable)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, STATICCONTROL);
if (enable)
val |= SHDEN;
else
val &= ~SHDEN;
dpu_fu_write(fu, STATICCONTROL, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_shden);
void fetchunit_baddr_autoupdate(struct dpu_fetchunit *fu, u8 layer_mask)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, STATICCONTROL);
val &= ~BASEADDRESSAUTOUPDATE_MASK;
val |= BASEADDRESSAUTOUPDATE(layer_mask);
dpu_fu_write(fu, STATICCONTROL, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_baddr_autoupdate);
void fetchunit_shdldreq_sticky(struct dpu_fetchunit *fu, u8 layer_mask)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, STATICCONTROL);
val &= ~SHDLDREQSTICKY_MASK;
val |= SHDLDREQSTICKY(layer_mask);
dpu_fu_write(fu, STATICCONTROL, val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_shdldreq_sticky);
void fetchunit_set_burstlength(struct dpu_fetchunit *fu,
unsigned int x_offset, unsigned int mt_w,
int bpp, dma_addr_t baddr, bool use_prefetch)
{
struct dpu_soc *dpu = fu->dpu;
unsigned int burst_size, burst_length;
bool nonzero_mod = !!mt_w;
u32 val;
if (use_prefetch) {
/* consider PRG x offset to calculate buffer address */
if (nonzero_mod)
baddr += (x_offset % mt_w) * (bpp / 8);
burst_size = fetchunit_burst_size_fixup_tkt343664(baddr);
burst_length = burst_size / 8;
} else {
burst_length = 16;
}
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, BURSTBUFFERMANAGEMENT);
val &= ~SETBURSTLENGTH_MASK;
val |= SETBURSTLENGTH(burst_length);
dpu_fu_write(fu, BURSTBUFFERMANAGEMENT, val);
mutex_unlock(&fu->mutex);
dev_dbg(dpu->dev, "%s%d burst length is %u\n",
fu->name, fu->id, burst_length);
}
EXPORT_SYMBOL_GPL(fetchunit_set_burstlength);
void fetchunit_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width,
unsigned int x_offset, unsigned int y_offset,
unsigned int mt_w, unsigned int mt_h,
int bpp, dma_addr_t baddr)
{
unsigned int burst_size, stride;
bool nonzero_mod = !!mt_w;
if (nonzero_mod) {
/* consider PRG x offset to calculate buffer address */
baddr += (x_offset % mt_w) * (bpp / 8);
burst_size = fetchunit_burst_size_fixup_tkt343664(baddr);
stride = width * (bpp / 8);
stride = fetchunit_stride_fixup_tkt339017(stride, burst_size,
baddr, nonzero_mod);
/* consider PRG y offset to calculate buffer address */
baddr += (y_offset % mt_h) * stride;
}
mutex_lock(&fu->mutex);
dpu_fu_write(fu, BASEADDRESS(fu->sub_id), baddr);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_set_baseaddress);
void fetchunit_set_src_bpp(struct dpu_fetchunit *fu, int bpp)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id));
val &= ~0x3f0000;
val |= BITSPERPIXEL(bpp);
dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_set_src_bpp);
/*
* The arguments width and bpp are valid only when use_prefetch is true.
* For fetcheco, since the pixel format has to be NV12 or NV21 when
* use_prefetch is true, we assume width stands for how many UV we have
* in bytes for one line, while bpp should be 8bits for every U or V component.
*/
void fetchunit_set_src_stride(struct dpu_fetchunit *fu,
unsigned int width, unsigned int x_offset,
unsigned int mt_w, int bpp, unsigned int stride,
dma_addr_t baddr, bool use_prefetch)
{
unsigned int burst_size;
bool nonzero_mod = !!mt_w;
u32 val;
if (use_prefetch) {
/* consider PRG x offset to calculate buffer address */
if (nonzero_mod)
baddr += (x_offset % mt_w) * (bpp / 8);
burst_size = fetchunit_burst_size_fixup_tkt343664(baddr);
stride = width * (bpp / 8);
stride = fetchunit_stride_fixup_tkt339017(stride, burst_size,
baddr, nonzero_mod);
}
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id));
val &= ~0xffff;
val |= STRIDE(stride);
dpu_fu_write(fu, SOURCEBUFFERATTRIBUTES(fu->sub_id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_set_src_stride);
void fetchunit_set_pixel_blend_mode(struct dpu_fetchunit *fu,
unsigned int pixel_blend_mode, u16 alpha,
u32 fb_format)
{
u32 mode = 0, val;
if (pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
mode = ALPHACONSTENABLE;
switch (fb_format) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
mode |= ALPHASRCENABLE;
break;
}
}
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id));
val &= ~(PREMULCONSTRGB | ALPHA_ENABLE_MASK | RGB_ENABLE_MASK);
val |= mode;
dpu_fu_write(fu, LAYERPROPERTY(fu->sub_id), val);
val = dpu_fu_read(fu, CONSTANTCOLOR(fu->sub_id));
val &= ~CONSTANTALPHA_MASK;
val |= CONSTANTALPHA(alpha >> 8);
dpu_fu_write(fu, CONSTANTCOLOR(fu->sub_id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_set_pixel_blend_mode);
void fetchunit_enable_src_buf(struct dpu_fetchunit *fu)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id));
val |= SOURCEBUFFERENABLE;
dpu_fu_write(fu, LAYERPROPERTY(fu->sub_id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_enable_src_buf);
void fetchunit_disable_src_buf(struct dpu_fetchunit *fu)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id));
val &= ~SOURCEBUFFERENABLE;
dpu_fu_write(fu, LAYERPROPERTY(fu->sub_id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchunit_disable_src_buf);
bool fetchunit_is_enabled(struct dpu_fetchunit *fu)
{
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY(fu->sub_id));
mutex_unlock(&fu->mutex);
return !!(val & SOURCEBUFFERENABLE);
}
EXPORT_SYMBOL_GPL(fetchunit_is_enabled);
unsigned int fetchunit_get_stream_id(struct dpu_fetchunit *fu)
{
if (WARN_ON(!fu))
return DPU_PLANE_SRC_DISABLED;
return fu->stream_id;
}
EXPORT_SYMBOL_GPL(fetchunit_get_stream_id);
void fetchunit_set_stream_id(struct dpu_fetchunit *fu, unsigned int id)
{
if (WARN_ON(!fu))
return;
switch (id) {
case DPU_PLANE_SRC_TO_DISP_STREAM0:
case DPU_PLANE_SRC_TO_DISP_STREAM1:
case DPU_PLANE_SRC_DISABLED:
fu->stream_id = id;
break;
default:
WARN_ON(1);
}
}
EXPORT_SYMBOL_GPL(fetchunit_set_stream_id);
void fetchunit_pin_off(struct dpu_fetchunit *fu)
{
if (WARN_ON(!fu))
return;
fu->pin_off = true;
}
EXPORT_SYMBOL_GPL(fetchunit_pin_off);
void fetchunit_unpin_off(struct dpu_fetchunit *fu)
{
if (WARN_ON(!fu))
return;
fu->pin_off = false;
}
EXPORT_SYMBOL_GPL(fetchunit_unpin_off);
bool fetchunit_is_pinned_off(struct dpu_fetchunit *fu)
{
if (WARN_ON(!fu))
return false;
return fu->pin_off;
}
EXPORT_SYMBOL_GPL(fetchunit_is_pinned_off);
bool fetchunit_is_fetchdecode(struct dpu_fetchunit *fu)
{
if (WARN_ON(!fu))
return false;
return fu->type == FU_T_FD;
}
EXPORT_SYMBOL_GPL(fetchunit_is_fetchdecode);
bool fetchunit_is_fetcheco(struct dpu_fetchunit *fu)
{
if (WARN_ON(!fu))
return false;
return fu->type == FU_T_FE;
}
EXPORT_SYMBOL_GPL(fetchunit_is_fetcheco);
bool fetchunit_is_fetchlayer(struct dpu_fetchunit *fu)
{
if (WARN_ON(!fu))
return false;
return fu->type == FU_T_FL;
}
EXPORT_SYMBOL_GPL(fetchunit_is_fetchlayer);
bool fetchunit_is_fetchwarp(struct dpu_fetchunit *fu)
{
if (WARN_ON(!fu))
return false;
return fu->type == FU_T_FW;
}
EXPORT_SYMBOL_GPL(fetchunit_is_fetchwarp);

View File

@ -0,0 +1,308 @@
/*
* Copyright 2018-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define PIXENGCFG_STATUS 0x8
#define BASEADDRESS(n) (0x10 + (n) * 0x28)
#define SOURCEBUFFERATTRIBUTES(n) (0x14 + (n) * 0x28)
#define SOURCEBUFFERDIMENSION(n) (0x18 + (n) * 0x28)
#define COLORCOMPONENTBITS(n) (0x1C + (n) * 0x28)
#define COLORCOMPONENTSHIFT(n) (0x20 + (n) * 0x28)
#define LAYEROFFSET(n) (0x24 + (n) * 0x28)
#define CLIPWINDOWOFFSET(n) (0x28 + (n) * 0x28)
#define CLIPWINDOWDIMENSIONS(n) (0x2C + (n) * 0x28)
#define CONSTANTCOLOR(n) (0x30 + (n) * 0x28)
#define LAYERPROPERTY(n) (0x34 + (n) * 0x28)
#define FRAMEDIMENSIONS 0x150
#define FRAMERESAMPLING 0x154
#define WARPCONTROL 0x158
#define ARBSTARTX 0x15c
#define ARBSTARTY 0x160
#define ARBDELTA 0x164
#define FIRPOSITIONS 0x168
#define FIRCOEFFICIENTS 0x16c
#define CONTROL 0x170
#define TRIGGERENABLE 0x174
#define SHDLDREQ(lm) ((lm) & 0xFF)
#define CONTROLTRIGGER 0x178
#define START 0x17c
#define FETCHTYPE 0x180
#define BURSTBUFFERPROPERTIES 0x184
#define STATUS 0x188
#define HIDDENSTATUS 0x18c
struct dpu_fetchwarp {
struct dpu_fetchunit fu;
fetchtype_t fetchtype;
};
static void
fetchwarp_set_src_buf_dimensions(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h,
u32 unused1, bool unused2)
{
u32 val;
val = LINEWIDTH(w) | LINECOUNT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, SOURCEBUFFERDIMENSION(fu->sub_id), val);
mutex_unlock(&fu->mutex);
}
static void fetchwarp_set_fmt(struct dpu_fetchunit *fu,
u32 fmt,
enum drm_color_encoding color_encoding,
enum drm_color_range color_range,
bool unused)
{
u32 val, bits, shift;
int i, sub_id = fu->sub_id;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, LAYERPROPERTY(sub_id));
val &= ~YUVCONVERSIONMODE_MASK;
dpu_fu_write(fu, LAYERPROPERTY(sub_id), val);
mutex_unlock(&fu->mutex);
for (i = 0; i < ARRAY_SIZE(dpu_pixel_format_matrix); i++) {
if (dpu_pixel_format_matrix[i].pixel_format == fmt) {
bits = dpu_pixel_format_matrix[i].bits;
shift = dpu_pixel_format_matrix[i].shift;
mutex_lock(&fu->mutex);
dpu_fu_write(fu, COLORCOMPONENTBITS(sub_id), bits);
dpu_fu_write(fu, COLORCOMPONENTSHIFT(sub_id), shift);
mutex_unlock(&fu->mutex);
return;
}
}
WARN_ON(1);
}
static void
fetchwarp_set_framedimensions(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h, bool unused)
{
u32 val;
val = FRAMEWIDTH(w) | FRAMEHEIGHT(h);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, FRAMEDIMENSIONS, val);
mutex_unlock(&fu->mutex);
}
void fetchwarp_rgb_constantcolor(struct dpu_fetchunit *fu,
u8 r, u8 g, u8 b, u8 a)
{
u32 val;
val = rgb_color(r, g, b, a);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchwarp_rgb_constantcolor);
void fetchwarp_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v)
{
u32 val;
val = yuv_color(y, u, v);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONSTANTCOLOR(fu->id), val);
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(fetchwarp_yuv_constantcolor);
static void fetchwarp_set_controltrigger(struct dpu_fetchunit *fu)
{
mutex_lock(&fu->mutex);
dpu_fu_write(fu, CONTROLTRIGGER, SHDTOKGEN);
mutex_unlock(&fu->mutex);
}
int fetchwarp_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type)
{
struct dpu_soc *dpu = fu->dpu;
u32 val;
mutex_lock(&fu->mutex);
val = dpu_fu_read(fu, FETCHTYPE);
val &= FETCHTYPE_MASK;
mutex_unlock(&fu->mutex);
switch (val) {
case FETCHTYPE__DECODE:
case FETCHTYPE__LAYER:
case FETCHTYPE__WARP:
case FETCHTYPE__ECO:
case FETCHTYPE__PERSP:
case FETCHTYPE__ROT:
case FETCHTYPE__DECODEL:
case FETCHTYPE__LAYERL:
case FETCHTYPE__ROTL:
break;
default:
dev_warn(dpu->dev, "Invalid fetch type %u for FetchWarp%d\n",
val, fu->id);
return -EINVAL;
}
*type = val;
return 0;
}
EXPORT_SYMBOL_GPL(fetchwarp_fetchtype);
struct dpu_fetchunit *dpu_fw_get(struct dpu_soc *dpu, int id)
{
struct dpu_fetchunit *fu;
int i;
for (i = 0; i < ARRAY_SIZE(fw_ids); i++)
if (fw_ids[i] == id)
break;
if (i == ARRAY_SIZE(fw_ids))
return ERR_PTR(-EINVAL);
fu = dpu->fw_priv[i];
mutex_lock(&fu->mutex);
if (fu->inuse) {
mutex_unlock(&fu->mutex);
return ERR_PTR(-EBUSY);
}
fu->inuse = true;
mutex_unlock(&fu->mutex);
return fu;
}
EXPORT_SYMBOL_GPL(dpu_fw_get);
void dpu_fw_put(struct dpu_fetchunit *fu)
{
mutex_lock(&fu->mutex);
fu->inuse = false;
mutex_unlock(&fu->mutex);
}
EXPORT_SYMBOL_GPL(dpu_fw_put);
static const struct dpu_fetchunit_ops fw_ops = {
.set_burstlength = fetchunit_set_burstlength,
.set_baseaddress = fetchunit_set_baseaddress,
.set_src_bpp = fetchunit_set_src_bpp,
.set_src_stride = fetchunit_set_src_stride,
.set_src_buf_dimensions = fetchwarp_set_src_buf_dimensions,
.set_fmt = fetchwarp_set_fmt,
.set_pixel_blend_mode = fetchunit_set_pixel_blend_mode,
.enable_src_buf = fetchunit_enable_src_buf,
.disable_src_buf = fetchunit_disable_src_buf,
.is_enabled = fetchunit_is_enabled,
.set_framedimensions = fetchwarp_set_framedimensions,
.set_controltrigger = fetchwarp_set_controltrigger,
.get_stream_id = fetchunit_get_stream_id,
.set_stream_id = fetchunit_set_stream_id,
.pin_off = fetchunit_pin_off,
.unpin_off = fetchunit_unpin_off,
.is_pinned_off = fetchunit_is_pinned_off,
};
void _dpu_fw_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_fetchunit *fu;
int i;
for (i = 0; i < ARRAY_SIZE(fw_ids); i++)
if (fw_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(fw_ids)))
return;
fu = dpu->fw_priv[i];
fetchunit_baddr_autoupdate(fu, 0x0);
fetchunit_shden(fu, true);
fetchunit_shdldreq_sticky(fu, 0xFF);
fetchunit_disable_src_buf(fu);
mutex_lock(&fu->mutex);
dpu_fu_write(fu, BURSTBUFFERMANAGEMENT,
SETNUMBUFFERS(16) | SETBURSTLENGTH(16));
mutex_unlock(&fu->mutex);
}
int dpu_fw_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_fetchwarp *fw;
struct dpu_fetchunit *fu;
int i, ret;
fw = devm_kzalloc(dpu->dev, sizeof(*fw), GFP_KERNEL);
if (!fw)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(fw_ids); i++)
if (fw_ids[i] == id)
break;
if (i == ARRAY_SIZE(fw_ids))
return -EINVAL;
fu = &fw->fu;
dpu->fw_priv[i] = fu;
fu->pec_base = devm_ioremap(dpu->dev, base, SZ_16);
if (!fu->pec_base)
return -ENOMEM;
fu->base = devm_ioremap(dpu->dev, base, SZ_512);
if (!fu->base)
return -ENOMEM;
fu->dpu = dpu;
fu->id = id;
fu->sub_id = 0;
fu->type = FU_T_FW;
fu->ops = &fw_ops;
fu->name = "fetchwarp";
mutex_init(&fu->mutex);
ret = fetchwarp_fetchtype(fu, &fw->fetchtype);
if (ret < 0)
return ret;
_dpu_fw_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,586 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <drm/drm_mode.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define FGSTCTRL 0x8
#define FGSYNCMODE_MASK 0x6
#define HTCFG1 0xC
#define HTOTAL(n) ((((n) - 1) & 0x3FFF) << 16)
#define HACT(n) ((n) & 0x3FFF)
#define HTCFG2 0x10
#define HSEN BIT(31)
#define HSBP(n) ((((n) - 1) & 0x3FFF) << 16)
#define HSYNC(n) (((n) - 1) & 0x3FFF)
#define VTCFG1 0x14
#define VTOTAL(n) ((((n) - 1) & 0x3FFF) << 16)
#define VACT(n) ((n) & 0x3FFF)
#define VTCFG2 0x18
#define VSEN BIT(31)
#define VSBP(n) ((((n) - 1) & 0x3FFF) << 16)
#define VSYNC(n) (((n) - 1) & 0x3FFF)
#define INTCONFIG(n) (0x1C + 4 * (n))
#define EN BIT(31)
#define ROW(n) (((n) & 0x3FFF) << 16)
#define COL(n) ((n) & 0x3FFF)
#define PKICKCONFIG 0x2C
#define SKICKCONFIG 0x30
#define SECSTATCONFIG 0x34
#define FGSRCR1 0x38
#define FGSRCR2 0x3C
#define FGSRCR3 0x40
#define FGSRCR4 0x44
#define FGSRCR5 0x48
#define FGSRCR6 0x4C
#define FGKSDR 0x50
#define PACFG 0x54
#define STARTX(n) (((n) + 1) & 0x3FFF)
#define STARTY(n) (((((n) + 1) & 0x3FFF)) << 16)
#define SACFG 0x58
#define FGINCTRL 0x5C
#define FGDM_MASK 0x7
#define ENPRIMALPHA BIT(3)
#define ENSECALPHA BIT(4)
#define FGINCTRLPANIC 0x60
#define FGCCR 0x64
#define CCALPHA(a) (((a) & 0x1) << 30)
#define CCRED(r) (((r) & 0x3FF) << 20)
#define CCGREEN(g) (((g) & 0x3FF) << 10)
#define CCBLUE(b) ((b) & 0x3FF)
#define FGENABLE 0x68
#define FGEN BIT(0)
#define FGSLR 0x6C
#define FGENSTS 0x70
#define ENSTS BIT(0)
#define FGTIMESTAMP 0x74
#define LINEINDEX_MASK 0x3FFF
#define LINEINDEX_SHIFT 0
#define FRAMEINDEX_MASK 0xFFFFC000
#define FRAMEINDEX_SHIFT 14
#define FGCHSTAT 0x78
#define SECSYNCSTAT BIT(24)
#define SFIFOEMPTY BIT(16)
#define FGCHSTATCLR 0x7C
#define CLRSECSTAT BIT(16)
#define FGSKEWMON 0x80
#define FGSFIFOMIN 0x84
#define FGSFIFOMAX 0x88
#define FGSFIFOFILLCLR 0x8C
#define FGSREPD 0x90
#define FGSRFTD 0x94
#define KHZ 1000
#define PLL_MIN_FREQ_HZ 648000000
struct dpu_framegen {
void __iomem *base;
struct clk *clk_pll;
struct clk *clk_bypass;
struct clk *clk_disp;
struct clk *clk_disp_lpcg;
struct mutex mutex;
int id;
unsigned int encoder_type;
bool inuse;
bool use_bypass_clk;
bool side_by_side;
struct dpu_soc *dpu;
};
static inline u32 dpu_fg_read(struct dpu_framegen *fg, unsigned int offset)
{
return readl(fg->base + offset);
}
static inline void dpu_fg_write(struct dpu_framegen *fg,
unsigned int offset, u32 value)
{
writel(value, fg->base + offset);
}
void framegen_enable(struct dpu_framegen *fg)
{
dpu_fg_write(fg, FGENABLE, FGEN);
}
EXPORT_SYMBOL_GPL(framegen_enable);
void framegen_disable(struct dpu_framegen *fg)
{
dpu_fg_write(fg, FGENABLE, 0);
}
EXPORT_SYMBOL_GPL(framegen_disable);
void framegen_enable_pixel_link(struct dpu_framegen *fg)
{
struct dpu_soc *dpu = fg->dpu;
const struct dpu_data *data = dpu->data;
if (!(data->has_dual_ldb && fg->encoder_type == DRM_MODE_ENCODER_LVDS))
dpu_pxlink_set_mst_enable(fg->dpu, fg->id, true);
}
EXPORT_SYMBOL_GPL(framegen_enable_pixel_link);
void framegen_disable_pixel_link(struct dpu_framegen *fg)
{
struct dpu_soc *dpu = fg->dpu;
const struct dpu_data *data = dpu->data;
if (!(data->has_dual_ldb && fg->encoder_type == DRM_MODE_ENCODER_LVDS))
dpu_pxlink_set_mst_enable(fg->dpu, fg->id, false);
}
EXPORT_SYMBOL_GPL(framegen_disable_pixel_link);
void framegen_shdtokgen(struct dpu_framegen *fg)
{
dpu_fg_write(fg, FGSLR, SHDTOKGEN);
}
EXPORT_SYMBOL_GPL(framegen_shdtokgen);
void framegen_syncmode(struct dpu_framegen *fg, fgsyncmode_t mode)
{
u32 val;
val = dpu_fg_read(fg, FGSTCTRL);
val &= ~FGSYNCMODE_MASK;
val |= mode;
dpu_fg_write(fg, FGSTCTRL, val);
dpu_pxlink_set_dc_sync_mode(fg->dpu, mode != FGSYNCMODE__OFF);
}
EXPORT_SYMBOL_GPL(framegen_syncmode);
void framegen_cfg_videomode(struct dpu_framegen *fg, struct drm_display_mode *m,
bool side_by_side, unsigned int encoder_type)
{
struct dpu_soc *dpu = fg->dpu;
u32 hact, htotal, hsync, hsbp;
u32 vact, vtotal, vsync, vsbp;
u32 kick_row, kick_col;
u32 val;
unsigned long disp_clock_rate, pll_clock_rate = 0;
int div = 0;
fg->side_by_side = side_by_side;
fg->encoder_type = encoder_type;
hact = m->crtc_hdisplay;
htotal = m->crtc_htotal;
hsync = m->crtc_hsync_end - m->crtc_hsync_start;
hsbp = m->crtc_htotal - m->crtc_hsync_start;
if (side_by_side) {
hact /= 2;
htotal /= 2;
hsync /= 2;
hsbp /= 2;
}
vact = m->crtc_vdisplay;
vtotal = m->crtc_vtotal;
vsync = m->crtc_vsync_end - m->crtc_vsync_start;
vsbp = m->crtc_vtotal - m->crtc_vsync_start;
/* video mode */
dpu_fg_write(fg, HTCFG1, HACT(hact) | HTOTAL(htotal));
dpu_fg_write(fg, HTCFG2, HSYNC(hsync) | HSBP(hsbp) | HSEN);
dpu_fg_write(fg, VTCFG1, VACT(vact) | VTOTAL(vtotal));
dpu_fg_write(fg, VTCFG2, VSYNC(vsync) | VSBP(vsbp) | VSEN);
kick_col = hact + 1;
kick_row = vact;
/*
* FrameGen as slave needs to be kicked later for
* one line comparing to the master.
*/
if (side_by_side && framegen_is_slave(fg))
kick_row++;
/* pkickconfig */
dpu_fg_write(fg, PKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
/* skikconfig */
dpu_fg_write(fg, SKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
/* primary and secondary area position config */
dpu_fg_write(fg, PACFG, STARTX(0) | STARTY(0));
dpu_fg_write(fg, SACFG, STARTX(0) | STARTY(0));
/* alpha */
val = dpu_fg_read(fg, FGINCTRL);
val &= ~(ENPRIMALPHA | ENSECALPHA);
dpu_fg_write(fg, FGINCTRL, val);
val = dpu_fg_read(fg, FGINCTRLPANIC);
val &= ~(ENPRIMALPHA | ENSECALPHA);
dpu_fg_write(fg, FGINCTRLPANIC, val);
/* constant color */
dpu_fg_write(fg, FGCCR, 0);
disp_clock_rate = m->crtc_clock * 1000;
if (encoder_type == DRM_MODE_ENCODER_TMDS) {
if (side_by_side)
dpu_pxlink_set_mst_addr(dpu, fg->id, fg->id ? 2 : 1);
else
dpu_pxlink_set_mst_addr(dpu, fg->id, 1);
clk_set_parent(fg->clk_disp, fg->clk_bypass);
fg->use_bypass_clk = true;
} else {
dpu_pxlink_set_mst_addr(dpu, fg->id, 0);
clk_set_parent(fg->clk_disp, fg->clk_pll);
/* find an even divisor for PLL */
do {
div += 2;
pll_clock_rate = disp_clock_rate * div;
} while (pll_clock_rate < PLL_MIN_FREQ_HZ);
clk_set_rate(fg->clk_pll, pll_clock_rate);
clk_set_rate(fg->clk_disp, disp_clock_rate);
fg->use_bypass_clk = false;
}
}
EXPORT_SYMBOL_GPL(framegen_cfg_videomode);
void framegen_pkickconfig(struct dpu_framegen *fg, bool enable)
{
u32 val;
val = dpu_fg_read(fg, PKICKCONFIG);
if (enable)
val |= EN;
else
val &= ~EN;
dpu_fg_write(fg, PKICKCONFIG, val);
}
EXPORT_SYMBOL_GPL(framegen_pkickconfig);
void framegen_syncmode_fixup(struct dpu_framegen *fg, bool enable)
{
u32 val;
val = dpu_fg_read(fg, SECSTATCONFIG);
if (enable)
val |= BIT(7);
else
val &= ~BIT(7);
dpu_fg_write(fg, SECSTATCONFIG, val);
}
EXPORT_SYMBOL_GPL(framegen_syncmode_fixup);
void framegen_displaymode(struct dpu_framegen *fg, fgdm_t mode)
{
u32 val;
val = dpu_fg_read(fg, FGINCTRL);
val &= ~FGDM_MASK;
val |= mode;
dpu_fg_write(fg, FGINCTRL, val);
}
EXPORT_SYMBOL_GPL(framegen_displaymode);
void framegen_panic_displaymode(struct dpu_framegen *fg, fgdm_t mode)
{
u32 val;
val = dpu_fg_read(fg, FGINCTRLPANIC);
val &= ~FGDM_MASK;
val |= mode;
dpu_fg_write(fg, FGINCTRLPANIC, val);
}
EXPORT_SYMBOL_GPL(framegen_panic_displaymode);
void framegen_wait_done(struct dpu_framegen *fg, struct drm_display_mode *m)
{
unsigned long timeout, pending_framedur_jiffies;
int frame_size = m->crtc_htotal * m->crtc_vtotal;
int dotclock, pending_framedur_ns;
u32 val;
dotclock = clk_get_rate(fg->clk_disp) / KHZ;
if (dotclock == 0) {
/* fall back to display mode's clock */
dotclock = m->crtc_clock;
}
/*
* The SoC designer indicates that there are two pending frames
* to complete in the worst case.
* So, three pending frames are enough for sure.
*/
pending_framedur_ns = div_u64((u64) 3 * frame_size * 1000000, dotclock);
pending_framedur_jiffies = nsecs_to_jiffies(pending_framedur_ns);
if (pending_framedur_jiffies > (3 * HZ)) {
pending_framedur_jiffies = 3 * HZ;
dev_warn(fg->dpu->dev,
"truncate FrameGen%d pending frame duration to 3sec\n",
fg->id);
}
timeout = jiffies + pending_framedur_jiffies;
do {
val = dpu_fg_read(fg, FGENSTS);
} while ((val & ENSTS) && time_before(jiffies, timeout));
dev_dbg(fg->dpu->dev, "FrameGen%d pending frame duration is %ums\n",
fg->id, jiffies_to_msecs(pending_framedur_jiffies));
if (val & ENSTS)
dev_err(fg->dpu->dev, "failed to wait for FrameGen%d done\n",
fg->id);
}
EXPORT_SYMBOL_GPL(framegen_wait_done);
static inline u32 framegen_frame_index(u32 stamp)
{
return (stamp & FRAMEINDEX_MASK) >> FRAMEINDEX_SHIFT;
}
static inline u32 framegen_line_index(u32 stamp)
{
return (stamp & LINEINDEX_MASK) >> LINEINDEX_SHIFT;
}
void framegen_read_timestamp(struct dpu_framegen *fg,
u32 *frame_index, u32 *line_index)
{
u32 stamp;
stamp = dpu_fg_read(fg, FGTIMESTAMP);
*frame_index = framegen_frame_index(stamp);
*line_index = framegen_line_index(stamp);
}
EXPORT_SYMBOL_GPL(framegen_read_timestamp);
void framegen_wait_for_frame_counter_moving(struct dpu_framegen *fg)
{
u32 frame_index, line_index, last_frame_index;
unsigned long timeout = jiffies + msecs_to_jiffies(50);
framegen_read_timestamp(fg, &frame_index, &line_index);
do {
last_frame_index = frame_index;
framegen_read_timestamp(fg, &frame_index, &line_index);
} while (last_frame_index == frame_index &&
time_before(jiffies, timeout));
if (last_frame_index == frame_index)
dev_err(fg->dpu->dev,
"failed to wait for FrameGen%d frame counter moving\n",
fg->id);
else
dev_dbg(fg->dpu->dev,
"FrameGen%d frame counter moves - last %u, curr %d\n",
fg->id, last_frame_index, frame_index);
}
EXPORT_SYMBOL_GPL(framegen_wait_for_frame_counter_moving);
bool framegen_secondary_requests_to_read_empty_fifo(struct dpu_framegen *fg)
{
u32 val;
bool empty;
val = dpu_fg_read(fg, FGCHSTAT);
empty = !!(val & SFIFOEMPTY);
if (empty)
dev_dbg(fg->dpu->dev,
"FrameGen%d secondary requests to read empty FIFO\n",
fg->id);
return empty;
}
EXPORT_SYMBOL_GPL(framegen_secondary_requests_to_read_empty_fifo);
void framegen_secondary_clear_channel_status(struct dpu_framegen *fg)
{
dpu_fg_write(fg, FGCHSTATCLR, CLRSECSTAT);
}
EXPORT_SYMBOL_GPL(framegen_secondary_clear_channel_status);
bool framegen_secondary_is_syncup(struct dpu_framegen *fg)
{
u32 val = dpu_fg_read(fg, FGCHSTAT);
return val & SECSYNCSTAT;
}
EXPORT_SYMBOL_GPL(framegen_secondary_is_syncup);
void framegen_wait_for_secondary_syncup(struct dpu_framegen *fg)
{
unsigned long timeout = jiffies + msecs_to_jiffies(50);
bool syncup;
do {
syncup = framegen_secondary_is_syncup(fg);
} while (!syncup && time_before(jiffies, timeout));
if (syncup)
dev_dbg(fg->dpu->dev, "FrameGen%d secondary syncup\n", fg->id);
else
dev_err(fg->dpu->dev,
"failed to wait for FrameGen%d secondary syncup\n",
fg->id);
}
EXPORT_SYMBOL_GPL(framegen_wait_for_secondary_syncup);
void framegen_enable_clock(struct dpu_framegen *fg)
{
if (!fg->use_bypass_clk)
clk_prepare_enable(fg->clk_pll);
clk_prepare_enable(fg->clk_disp);
clk_prepare_enable(fg->clk_disp_lpcg);
}
EXPORT_SYMBOL_GPL(framegen_enable_clock);
void framegen_disable_clock(struct dpu_framegen *fg)
{
if (!fg->use_bypass_clk)
clk_disable_unprepare(fg->clk_pll);
clk_disable_unprepare(fg->clk_disp);
clk_disable_unprepare(fg->clk_disp_lpcg);
}
EXPORT_SYMBOL_GPL(framegen_disable_clock);
bool framegen_is_master(struct dpu_framegen *fg)
{
const struct dpu_data *data = fg->dpu->data;
return fg->id == data->master_stream_id;
}
EXPORT_SYMBOL_GPL(framegen_is_master);
bool framegen_is_slave(struct dpu_framegen *fg)
{
return !framegen_is_master(fg);
}
EXPORT_SYMBOL_GPL(framegen_is_slave);
struct dpu_framegen *dpu_fg_get(struct dpu_soc *dpu, int id)
{
struct dpu_framegen *fg;
int i;
for (i = 0; i < ARRAY_SIZE(fg_ids); i++)
if (fg_ids[i] == id)
break;
if (i == ARRAY_SIZE(fg_ids))
return ERR_PTR(-EINVAL);
fg = dpu->fg_priv[i];
mutex_lock(&fg->mutex);
if (fg->inuse) {
mutex_unlock(&fg->mutex);
return ERR_PTR(-EBUSY);
}
fg->inuse = true;
mutex_unlock(&fg->mutex);
return fg;
}
EXPORT_SYMBOL_GPL(dpu_fg_get);
void dpu_fg_put(struct dpu_framegen *fg)
{
mutex_lock(&fg->mutex);
fg->inuse = false;
mutex_unlock(&fg->mutex);
}
EXPORT_SYMBOL_GPL(dpu_fg_put);
struct dpu_framegen *dpu_aux_fg_peek(struct dpu_framegen *fg)
{
return fg->dpu->fg_priv[fg->id ^ 1];
}
EXPORT_SYMBOL_GPL(dpu_aux_fg_peek);
void _dpu_fg_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_framegen *fg;
int i;
for (i = 0; i < ARRAY_SIZE(fg_ids); i++)
if (fg_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(fg_ids)))
return;
fg = dpu->fg_priv[i];
framegen_syncmode(fg, FGSYNCMODE__OFF);
}
int dpu_fg_init(struct dpu_soc *dpu, unsigned int id,
unsigned long unused, unsigned long base)
{
struct dpu_framegen *fg;
fg = devm_kzalloc(dpu->dev, sizeof(*fg), GFP_KERNEL);
if (!fg)
return -ENOMEM;
dpu->fg_priv[id] = fg;
fg->base = devm_ioremap(dpu->dev, base, SZ_256);
if (!fg->base)
return -ENOMEM;
fg->clk_pll = devm_clk_get(dpu->dev, id ? "pll1" : "pll0");
if (IS_ERR(fg->clk_pll))
return PTR_ERR(fg->clk_pll);
fg->clk_bypass = devm_clk_get(dpu->dev, "bypass0");
if (IS_ERR(fg->clk_bypass))
return PTR_ERR(fg->clk_bypass);
fg->clk_disp = devm_clk_get(dpu->dev, id ? "disp1" : "disp0");
if (IS_ERR(fg->clk_disp))
return PTR_ERR(fg->clk_disp);
fg->clk_disp_lpcg = devm_clk_get(dpu->dev, id ? "disp1_lpcg" : "disp0_lpcg");
if (IS_ERR(fg->clk_disp_lpcg))
return PTR_ERR(fg->clk_disp_lpcg);
fg->dpu = dpu;
fg->id = id;
mutex_init(&fg->mutex);
_dpu_fg_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,386 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define PIXENGCFG_DYNAMIC 0x8
#define PIXENGCFG_DYNAMIC_SRC_SEL_MASK 0x3F
#define SETUP1 0xC
#define SCALE_FACTOR_MASK 0xFFFFF
#define SCALE_FACTOR(n) ((n) & 0xFFFFF)
#define SETUP2 0x10
#define PHASE_OFFSET_MASK 0x1FFFFF
#define PHASE_OFFSET(n) ((n) & 0x1FFFFF)
#define CONTROL 0x14
#define OUTPUT_SIZE_MASK 0x3FFF0000
#define OUTPUT_SIZE(n) ((((n) - 1) << 16) & OUTPUT_SIZE_MASK)
#define FILTER_MODE 0x100
#define SCALE_MODE 0x10
#define MODE 0x1
static const hs_src_sel_t src_sels[3][6] = {
{
HS_SRC_SEL__DISABLE,
HS_SRC_SEL__FETCHDECODE0,
HS_SRC_SEL__MATRIX4,
HS_SRC_SEL__VSCALER4,
}, {
HS_SRC_SEL__DISABLE,
HS_SRC_SEL__FETCHDECODE1,
HS_SRC_SEL__MATRIX5,
HS_SRC_SEL__VSCALER5,
}, {
HS_SRC_SEL__DISABLE,
HS_SRC_SEL__MATRIX9,
HS_SRC_SEL__VSCALER9,
HS_SRC_SEL__FILTER9,
},
};
struct dpu_hscaler {
void __iomem *pec_base;
void __iomem *base;
struct mutex mutex;
int id;
bool inuse;
struct dpu_soc *dpu;
/* see DPU_PLANE_SRC_xxx */
unsigned int stream_id;
};
static inline u32 dpu_pec_hs_read(struct dpu_hscaler *hs,
unsigned int offset)
{
return readl(hs->pec_base + offset);
}
static inline void dpu_pec_hs_write(struct dpu_hscaler *hs,
unsigned int offset, u32 value)
{
writel(value, hs->pec_base + offset);
}
static inline u32 dpu_hs_read(struct dpu_hscaler *hs, unsigned int offset)
{
return readl(hs->base + offset);
}
static inline void dpu_hs_write(struct dpu_hscaler *hs,
unsigned int offset, u32 value)
{
writel(value, hs->base + offset);
}
int hscaler_pixengcfg_dynamic_src_sel(struct dpu_hscaler *hs, hs_src_sel_t src)
{
struct dpu_soc *dpu = hs->dpu;
const unsigned int hs_id_array[] = {4, 5, 9};
int i, j;
u32 val;
for (i = 0; i < ARRAY_SIZE(hs_id_array); i++)
if (hs_id_array[i] == hs->id)
break;
if (WARN_ON(i == ARRAY_SIZE(hs_id_array)))
return -EINVAL;
mutex_lock(&hs->mutex);
for (j = 0; j < ARRAY_SIZE(src_sels[0]); j++) {
if (src_sels[i][j] == src) {
val = dpu_pec_hs_read(hs, PIXENGCFG_DYNAMIC);
val &= ~PIXENGCFG_DYNAMIC_SRC_SEL_MASK;
val |= src;
dpu_pec_hs_write(hs, PIXENGCFG_DYNAMIC, val);
mutex_unlock(&hs->mutex);
return 0;
}
}
mutex_unlock(&hs->mutex);
dev_err(dpu->dev, "Invalid source for HScaler%d\n", hs->id);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(hscaler_pixengcfg_dynamic_src_sel);
void hscaler_pixengcfg_clken(struct dpu_hscaler *hs, pixengcfg_clken_t clken)
{
u32 val;
mutex_lock(&hs->mutex);
val = dpu_pec_hs_read(hs, PIXENGCFG_DYNAMIC);
val &= ~CLKEN_MASK;
val |= clken << CLKEN_MASK_SHIFT;
dpu_pec_hs_write(hs, PIXENGCFG_DYNAMIC, val);
mutex_unlock(&hs->mutex);
}
EXPORT_SYMBOL_GPL(hscaler_pixengcfg_clken);
void hscaler_shden(struct dpu_hscaler *hs, bool enable)
{
u32 val;
mutex_lock(&hs->mutex);
val = dpu_hs_read(hs, STATICCONTROL);
if (enable)
val |= SHDEN;
else
val &= ~SHDEN;
dpu_hs_write(hs, STATICCONTROL, val);
mutex_unlock(&hs->mutex);
}
EXPORT_SYMBOL_GPL(hscaler_shden);
void hscaler_setup1(struct dpu_hscaler *hs, u32 src, u32 dst)
{
struct dpu_soc *dpu = hs->dpu;
u32 scale_factor;
u64 tmp64;
if (src == dst) {
scale_factor = 0x80000;
} else {
if (src > dst) {
tmp64 = (u64)((u64)dst * 0x80000);
do_div(tmp64, src);
} else {
tmp64 = (u64)((u64)src * 0x80000);
do_div(tmp64, dst);
}
scale_factor = (u32)tmp64;
}
WARN_ON(scale_factor > 0x80000);
mutex_lock(&hs->mutex);
dpu_hs_write(hs, SETUP1, SCALE_FACTOR(scale_factor));
mutex_unlock(&hs->mutex);
dev_dbg(dpu->dev, "Hscaler%d scale factor 0x%08x\n",
hs->id, scale_factor);
}
EXPORT_SYMBOL_GPL(hscaler_setup1);
void hscaler_setup2(struct dpu_hscaler *hs, u32 phase_offset)
{
mutex_lock(&hs->mutex);
dpu_hs_write(hs, SETUP2, PHASE_OFFSET(phase_offset));
mutex_unlock(&hs->mutex);
}
EXPORT_SYMBOL_GPL(hscaler_setup2);
void hscaler_output_size(struct dpu_hscaler *hs, u32 line_num)
{
u32 val;
mutex_lock(&hs->mutex);
val = dpu_hs_read(hs, CONTROL);
val &= ~OUTPUT_SIZE_MASK;
val |= OUTPUT_SIZE(line_num);
dpu_hs_write(hs, CONTROL, val);
mutex_unlock(&hs->mutex);
}
EXPORT_SYMBOL_GPL(hscaler_output_size);
void hscaler_filter_mode(struct dpu_hscaler *hs, scaler_filter_mode_t m)
{
u32 val;
mutex_lock(&hs->mutex);
val = dpu_hs_read(hs, CONTROL);
val &= ~FILTER_MODE;
val |= m;
dpu_hs_write(hs, CONTROL, val);
mutex_unlock(&hs->mutex);
}
EXPORT_SYMBOL_GPL(hscaler_filter_mode);
void hscaler_scale_mode(struct dpu_hscaler *hs, scaler_scale_mode_t m)
{
u32 val;
mutex_lock(&hs->mutex);
val = dpu_hs_read(hs, CONTROL);
val &= ~SCALE_MODE;
val |= m;
dpu_hs_write(hs, CONTROL, val);
mutex_unlock(&hs->mutex);
}
EXPORT_SYMBOL_GPL(hscaler_scale_mode);
void hscaler_mode(struct dpu_hscaler *hs, scaler_mode_t m)
{
u32 val;
mutex_lock(&hs->mutex);
val = dpu_hs_read(hs, CONTROL);
val &= ~MODE;
val |= m;
dpu_hs_write(hs, CONTROL, val);
mutex_unlock(&hs->mutex);
}
EXPORT_SYMBOL_GPL(hscaler_mode);
bool hscaler_is_enabled(struct dpu_hscaler *hs)
{
u32 val;
mutex_lock(&hs->mutex);
val = dpu_hs_read(hs, CONTROL);
mutex_unlock(&hs->mutex);
return (val & MODE) == SCALER_ACTIVE;
}
EXPORT_SYMBOL_GPL(hscaler_is_enabled);
dpu_block_id_t hscaler_get_block_id(struct dpu_hscaler *hs)
{
switch (hs->id) {
case 4:
return ID_HSCALER4;
case 5:
return ID_HSCALER5;
case 9:
return ID_HSCALER9;
default:
WARN_ON(1);
}
return ID_NONE;
}
EXPORT_SYMBOL_GPL(hscaler_get_block_id);
unsigned int hscaler_get_stream_id(struct dpu_hscaler *hs)
{
return hs->stream_id;
}
EXPORT_SYMBOL_GPL(hscaler_get_stream_id);
void hscaler_set_stream_id(struct dpu_hscaler *hs, unsigned int id)
{
switch (id) {
case DPU_PLANE_SRC_TO_DISP_STREAM0:
case DPU_PLANE_SRC_TO_DISP_STREAM1:
case DPU_PLANE_SRC_DISABLED:
hs->stream_id = id;
break;
default:
WARN_ON(1);
}
}
EXPORT_SYMBOL_GPL(hscaler_set_stream_id);
struct dpu_hscaler *dpu_hs_get(struct dpu_soc *dpu, int id)
{
struct dpu_hscaler *hs;
int i;
for (i = 0; i < ARRAY_SIZE(hs_ids); i++)
if (hs_ids[i] == id)
break;
if (i == ARRAY_SIZE(hs_ids))
return ERR_PTR(-EINVAL);
hs = dpu->hs_priv[i];
mutex_lock(&hs->mutex);
if (hs->inuse) {
mutex_unlock(&hs->mutex);
return ERR_PTR(-EBUSY);
}
hs->inuse = true;
mutex_unlock(&hs->mutex);
return hs;
}
EXPORT_SYMBOL_GPL(dpu_hs_get);
void dpu_hs_put(struct dpu_hscaler *hs)
{
mutex_lock(&hs->mutex);
hs->inuse = false;
mutex_unlock(&hs->mutex);
}
EXPORT_SYMBOL_GPL(dpu_hs_put);
void _dpu_hs_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_hscaler *hs;
int i;
for (i = 0; i < ARRAY_SIZE(hs_ids); i++)
if (hs_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(hs_ids)))
return;
hs = dpu->hs_priv[i];
hscaler_shden(hs, true);
hscaler_setup2(hs, 0);
hscaler_pixengcfg_dynamic_src_sel(hs, HS_SRC_SEL__DISABLE);
}
int dpu_hs_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_hscaler *hs;
int i;
hs = devm_kzalloc(dpu->dev, sizeof(*hs), GFP_KERNEL);
if (!hs)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(hs_ids); i++)
if (hs_ids[i] == id)
break;
if (i == ARRAY_SIZE(hs_ids))
return -EINVAL;
dpu->hs_priv[i] = hs;
hs->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_8);
if (!hs->pec_base)
return -ENOMEM;
hs->base = devm_ioremap(dpu->dev, base, SZ_1K);
if (!hs->base)
return -ENOMEM;
hs->dpu = dpu;
hs->id = id;
mutex_init(&hs->mutex);
_dpu_hs_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,346 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <drm/drm_blend.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define PIXENGCFG_DYNAMIC 0x8
#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK 0x3F
#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK 0x3F00
#define PIXENGCFG_DYNAMIC_SEC_SEL_SHIFT 8
static const lb_prim_sel_t prim_sels[] = {
LB_PRIM_SEL__DISABLE,
LB_PRIM_SEL__BLITBLEND9,
LB_PRIM_SEL__CONSTFRAME0,
LB_PRIM_SEL__CONSTFRAME1,
LB_PRIM_SEL__CONSTFRAME4,
LB_PRIM_SEL__CONSTFRAME5,
LB_PRIM_SEL__MATRIX4,
LB_PRIM_SEL__HSCALER4,
LB_PRIM_SEL__VSCALER4,
LB_PRIM_SEL__MATRIX5,
LB_PRIM_SEL__HSCALER5,
LB_PRIM_SEL__VSCALER5,
LB_PRIM_SEL__LAYERBLEND0,
LB_PRIM_SEL__LAYERBLEND1,
LB_PRIM_SEL__LAYERBLEND2,
LB_PRIM_SEL__LAYERBLEND3,
};
#define PIXENGCFG_STATUS 0xC
#define SHDTOKSEL (0x3 << 3)
#define SHDTOKSEL_SHIFT 3
#define SHDLDSEL (0x3 << 1)
#define SHDLDSEL_SHIFT 1
#define CONTROL 0xC
#define OPERATION_MODE_MASK BIT(0)
#define BLENDCONTROL 0x10
#define ALPHA(a) (((a) & 0xFF) << 16)
#define PRIM_C_BLD_FUNC__ONE_MINUS_CONST_ALPHA 0x7
#define PRIM_C_BLD_FUNC__ONE_MINUS_SEC_ALPHA 0x5
#define PRIM_C_BLD_FUNC__ZERO 0x0
#define SEC_C_BLD_FUNC__CONST_ALPHA (0x6 << 4)
#define SEC_C_BLD_FUNC__SEC_ALPHA (0x4 << 4)
#define PRIM_A_BLD_FUNC__ZERO (0x0 << 8)
#define SEC_A_BLD_FUNC__ZERO (0x0 << 12)
#define POSITION 0x14
#define XPOS(x) ((x) & 0x7FFF)
#define YPOS(y) (((y) & 0x7FFF) << 16)
#define PRIMCONTROLWORD 0x18
#define SECCONTROLWORD 0x1C
struct dpu_layerblend {
void __iomem *pec_base;
void __iomem *base;
struct mutex mutex;
int id;
bool inuse;
struct dpu_soc *dpu;
};
static inline u32 dpu_pec_lb_read(struct dpu_layerblend *lb,
unsigned int offset)
{
return readl(lb->pec_base + offset);
}
static inline void dpu_pec_lb_write(struct dpu_layerblend *lb,
unsigned int offset, u32 value)
{
writel(value, lb->pec_base + offset);
}
static inline u32 dpu_lb_read(struct dpu_layerblend *lb, unsigned int offset)
{
return readl(lb->base + offset);
}
static inline void dpu_lb_write(struct dpu_layerblend *lb,
unsigned int offset, u32 value)
{
writel(value, lb->base + offset);
}
int layerblend_pixengcfg_dynamic_prim_sel(struct dpu_layerblend *lb,
lb_prim_sel_t prim)
{
struct dpu_soc *dpu = lb->dpu;
int fixed_sels_num = ARRAY_SIZE(prim_sels) - 4;
int i;
u32 val;
mutex_lock(&lb->mutex);
for (i = 0; i < fixed_sels_num + lb->id; i++) {
if (prim_sels[i] == prim) {
val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC);
val &= ~PIXENGCFG_DYNAMIC_PRIM_SEL_MASK;
val |= prim;
dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val);
mutex_unlock(&lb->mutex);
return 0;
}
}
mutex_unlock(&lb->mutex);
dev_err(dpu->dev, "Invalid primary source for LayerBlend%d\n", lb->id);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(layerblend_pixengcfg_dynamic_prim_sel);
void layerblend_pixengcfg_dynamic_sec_sel(struct dpu_layerblend *lb,
lb_sec_sel_t sec)
{
u32 val;
mutex_lock(&lb->mutex);
val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC);
val &= ~PIXENGCFG_DYNAMIC_SEC_SEL_MASK;
val |= sec << PIXENGCFG_DYNAMIC_SEC_SEL_SHIFT;
dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val);
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(layerblend_pixengcfg_dynamic_sec_sel);
void layerblend_pixengcfg_clken(struct dpu_layerblend *lb,
pixengcfg_clken_t clken)
{
u32 val;
mutex_lock(&lb->mutex);
val = dpu_pec_lb_read(lb, PIXENGCFG_DYNAMIC);
val &= ~CLKEN_MASK;
val |= clken << CLKEN_MASK_SHIFT;
dpu_pec_lb_write(lb, PIXENGCFG_DYNAMIC, val);
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(layerblend_pixengcfg_clken);
void layerblend_shden(struct dpu_layerblend *lb, bool enable)
{
u32 val;
mutex_lock(&lb->mutex);
val = dpu_lb_read(lb, STATICCONTROL);
if (enable)
val |= SHDEN;
else
val &= ~SHDEN;
dpu_lb_write(lb, STATICCONTROL, val);
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(layerblend_shden);
void layerblend_shdtoksel(struct dpu_layerblend *lb, lb_shadow_sel_t sel)
{
u32 val;
mutex_lock(&lb->mutex);
val = dpu_lb_read(lb, STATICCONTROL);
val &= ~SHDTOKSEL;
val |= (sel << SHDTOKSEL_SHIFT);
dpu_lb_write(lb, STATICCONTROL, val);
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(layerblend_shdtoksel);
void layerblend_shdldsel(struct dpu_layerblend *lb, lb_shadow_sel_t sel)
{
u32 val;
mutex_lock(&lb->mutex);
val = dpu_lb_read(lb, STATICCONTROL);
val &= ~SHDLDSEL;
val |= (sel << SHDLDSEL_SHIFT);
dpu_lb_write(lb, STATICCONTROL, val);
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(layerblend_shdldsel);
void layerblend_control(struct dpu_layerblend *lb, lb_mode_t mode)
{
u32 val;
mutex_lock(&lb->mutex);
val = dpu_lb_read(lb, CONTROL);
val &= ~OPERATION_MODE_MASK;
val |= mode;
dpu_lb_write(lb, CONTROL, val);
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(layerblend_control);
void layerblend_blendcontrol(struct dpu_layerblend *lb, unsigned int zpos,
unsigned int pixel_blend_mode, u16 alpha)
{
u32 val = PRIM_A_BLD_FUNC__ZERO | SEC_A_BLD_FUNC__ZERO;
if (zpos == 0) {
val |= PRIM_C_BLD_FUNC__ZERO | SEC_C_BLD_FUNC__CONST_ALPHA;
alpha = DRM_BLEND_ALPHA_OPAQUE;
} else {
switch (pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
val |= PRIM_C_BLD_FUNC__ONE_MINUS_CONST_ALPHA |
SEC_C_BLD_FUNC__CONST_ALPHA;
break;
case DRM_MODE_BLEND_PREMULTI:
val |= PRIM_C_BLD_FUNC__ONE_MINUS_SEC_ALPHA |
SEC_C_BLD_FUNC__CONST_ALPHA;
break;
case DRM_MODE_BLEND_COVERAGE:
val |= PRIM_C_BLD_FUNC__ONE_MINUS_SEC_ALPHA |
SEC_C_BLD_FUNC__SEC_ALPHA;
break;
default:
break;
}
}
val |= ALPHA(alpha >> 8);
mutex_lock(&lb->mutex);
dpu_lb_write(lb, BLENDCONTROL, val);
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(layerblend_blendcontrol);
void layerblend_position(struct dpu_layerblend *lb, int x, int y)
{
mutex_lock(&lb->mutex);
dpu_lb_write(lb, POSITION, XPOS(x) | YPOS(y));
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(layerblend_position);
struct dpu_layerblend *dpu_lb_get(struct dpu_soc *dpu, int id)
{
struct dpu_layerblend *lb;
int i;
for (i = 0; i < ARRAY_SIZE(lb_ids); i++)
if (lb_ids[i] == id)
break;
if (i == ARRAY_SIZE(lb_ids))
return ERR_PTR(-EINVAL);
lb = dpu->lb_priv[i];
mutex_lock(&lb->mutex);
if (lb->inuse) {
mutex_unlock(&lb->mutex);
return ERR_PTR(-EBUSY);
}
lb->inuse = true;
mutex_unlock(&lb->mutex);
return lb;
}
EXPORT_SYMBOL_GPL(dpu_lb_get);
void dpu_lb_put(struct dpu_layerblend *lb)
{
mutex_lock(&lb->mutex);
lb->inuse = false;
mutex_unlock(&lb->mutex);
}
EXPORT_SYMBOL_GPL(dpu_lb_put);
void _dpu_lb_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_layerblend *lb;
int i;
for (i = 0; i < ARRAY_SIZE(lb_ids); i++)
if (lb_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(lb_ids)))
return;
lb = dpu->lb_priv[i];
layerblend_pixengcfg_dynamic_prim_sel(lb, LB_PRIM_SEL__DISABLE);
layerblend_pixengcfg_dynamic_sec_sel(lb, LB_SEC_SEL__DISABLE);
layerblend_pixengcfg_clken(lb, CLKEN__AUTOMATIC);
layerblend_shdldsel(lb, BOTH);
layerblend_shdtoksel(lb, BOTH);
layerblend_shden(lb, true);
}
int dpu_lb_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_layerblend *lb;
int ret;
lb = devm_kzalloc(dpu->dev, sizeof(*lb), GFP_KERNEL);
if (!lb)
return -ENOMEM;
dpu->lb_priv[id] = lb;
lb->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_16);
if (!lb->pec_base)
return -ENOMEM;
lb->base = devm_ioremap(dpu->dev, base, SZ_32);
if (!lb->base)
return -ENOMEM;
lb->dpu = dpu;
lb->id = id;
mutex_init(&lb->mutex);
ret = layerblend_pixengcfg_dynamic_prim_sel(lb, LB_PRIM_SEL__DISABLE);
if (ret < 0)
return ret;
_dpu_lb_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,445 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef __DPU_PRV_H__
#define __DPU_PRV_H__
#include <linux/firmware/imx/sci.h>
#include <drm/drm_fourcc.h>
#include <video/dpu.h>
#define STATICCONTROL 0x8
#define SHDLDREQSTICKY(lm) (((lm) & 0xFF) << 24)
#define SHDLDREQSTICKY_MASK (0xFF << 24)
#define BASEADDRESSAUTOUPDATE(lm) (((lm) & 0xFF) << 16)
#define BASEADDRESSAUTOUPDATE_MASK (0xFF << 16)
#define SHDEN BIT(0)
#define BURSTBUFFERMANAGEMENT 0xC
#define SETNUMBUFFERS(n) ((n) & 0xFF)
#define SETBURSTLENGTH(n) (((n) & 0x1F) << 8)
#define SETBURSTLENGTH_MASK 0x1F00
#define LINEMODE_MASK 0x80000000U
#define LINEMODE_SHIFT 31U
enum linemode {
/*
* Mandatory setting for operation in the Display Controller.
* Works also for Blit Engine with marginal performance impact.
*/
LINEMODE__DISPLAY = 0,
/* Recommended setting for operation in the Blit Engine. */
LINEMODE__BLIT = 1 << LINEMODE_SHIFT,
};
#define BITSPERPIXEL(bpp) (((bpp) & 0x3F) << 16)
#define STRIDE(n) (((n) - 1) & 0xFFFF)
#define LINEWIDTH(w) (((w) - 1) & 0x3FFF)
#define LINECOUNT(h) ((((h) - 1) & 0x3FFF) << 16)
#define ITUFORMAT BIT(31)
#define R_BITS(n) (((n) & 0xF) << 24)
#define G_BITS(n) (((n) & 0xF) << 16)
#define B_BITS(n) (((n) & 0xF) << 8)
#define A_BITS(n) ((n) & 0xF)
#define R_SHIFT(n) (((n) & 0x1F) << 24)
#define G_SHIFT(n) (((n) & 0x1F) << 16)
#define B_SHIFT(n) (((n) & 0x1F) << 8)
#define A_SHIFT(n) ((n) & 0x1F)
#define Y_BITS(n) R_BITS(n)
#define Y_BITS_MASK 0xF000000
#define U_BITS(n) G_BITS(n)
#define U_BITS_MASK 0xF0000
#define V_BITS(n) B_BITS(n)
#define V_BITS_MASK 0xF00
#define Y_SHIFT(n) R_SHIFT(n)
#define Y_SHIFT_MASK 0x1F000000
#define U_SHIFT(n) G_SHIFT(n)
#define U_SHIFT_MASK 0x1F0000
#define V_SHIFT(n) B_SHIFT(n)
#define V_SHIFT_MASK 0x1F00
#define LAYERXOFFSET(x) ((x) & 0x7FFF)
#define LAYERYOFFSET(y) (((y) & 0x7FFF) << 16)
#define CLIPWINDOWXOFFSET(x) ((x) & 0x7FFF)
#define CLIPWINDOWYOFFSET(y) (((y) & 0x7FFF) << 16)
#define CLIPWINDOWWIDTH(w) (((w) - 1) & 0x3FFF)
#define CLIPWINDOWHEIGHT(h) ((((h) - 1) & 0x3FFF) << 16)
#define CONSTANTALPHA_MASK 0xFF
#define CONSTANTALPHA(n) ((n) & CONSTANTALPHA_MASK)
#define PALETTEENABLE BIT(0)
typedef enum {
TILE_FILL_ZERO,
TILE_FILL_CONSTANT,
TILE_PAD,
TILE_PAD_ZERO,
} tilemode_t;
#define ALPHASRCENABLE BIT(8)
#define ALPHACONSTENABLE BIT(9)
#define ALPHAMASKENABLE BIT(10)
#define ALPHATRANSENABLE BIT(11)
#define ALPHA_ENABLE_MASK (ALPHASRCENABLE | ALPHACONSTENABLE | \
ALPHAMASKENABLE | ALPHATRANSENABLE)
#define RGBALPHASRCENABLE BIT(12)
#define RGBALPHACONSTENABLE BIT(13)
#define RGBALPHAMASKENABLE BIT(14)
#define RGBALPHATRANSENABLE BIT(15)
#define RGB_ENABLE_MASK (RGBALPHASRCENABLE | \
RGBALPHACONSTENABLE | \
RGBALPHAMASKENABLE | \
RGBALPHATRANSENABLE)
#define PREMULCONSTRGB BIT(16)
typedef enum {
YUVCONVERSIONMODE__OFF,
YUVCONVERSIONMODE__ITU601,
YUVCONVERSIONMODE__ITU601_FR,
YUVCONVERSIONMODE__ITU709,
} yuvconversionmode_t;
#define YUVCONVERSIONMODE_MASK 0x60000
#define YUVCONVERSIONMODE(m) (((m) & 0x3) << 17)
#define GAMMAREMOVEENABLE BIT(20)
#define CLIPWINDOWENABLE BIT(30)
#define SOURCEBUFFERENABLE BIT(31)
#define EMPTYFRAME BIT(31)
#define FRAMEWIDTH(w) (((w) - 1) & 0x3FFF)
#define FRAMEHEIGHT(h) ((((h) - 1) & 0x3FFF) << 16)
#define DELTAX_MASK 0x3F000
#define DELTAY_MASK 0xFC0000
#define DELTAX(x) (((x) & 0x3F) << 12)
#define DELTAY(y) (((y) & 0x3F) << 18)
#define YUV422UPSAMPLINGMODE_MASK BIT(5)
#define YUV422UPSAMPLINGMODE(m) (((m) & 0x1) << 5)
typedef enum {
YUV422UPSAMPLINGMODE__REPLICATE,
YUV422UPSAMPLINGMODE__INTERPOLATE,
} yuv422upsamplingmode_t;
#define INPUTSELECT_MASK 0x18
#define INPUTSELECT(s) (((s) & 0x3) << 3)
typedef enum {
INPUTSELECT__INACTIVE,
INPUTSELECT__COMPPACK,
INPUTSELECT__ALPHAMASK,
INPUTSELECT__COORDINATE,
} inputselect_t;
#define RASTERMODE_MASK 0x7
#define RASTERMODE(m) ((m) & 0x7)
typedef enum {
RASTERMODE__NORMAL,
RASTERMODE__DECODE,
RASTERMODE__ARBITRARY,
RASTERMODE__PERSPECTIVE,
RASTERMODE__YUV422,
RASTERMODE__AFFINE,
} rastermode_t;
#define SHDTOKGEN BIT(0)
#define FETCHTYPE_MASK 0xF
#define DPU_FRAC_PLANE_LAYER_NUM 8
#define DPU_VPROC_CAP_HSCALER4 BIT(0)
#define DPU_VPROC_CAP_VSCALER4 BIT(1)
#define DPU_VPROC_CAP_HSCALER5 BIT(2)
#define DPU_VPROC_CAP_VSCALER5 BIT(3)
#define DPU_VPROC_CAP_FETCHECO0 BIT(4)
#define DPU_VPROC_CAP_FETCHECO1 BIT(5)
#define DPU_VPROC_CAP_HSCALE (DPU_VPROC_CAP_HSCALER4 | \
DPU_VPROC_CAP_HSCALER5)
#define DPU_VPROC_CAP_VSCALE (DPU_VPROC_CAP_VSCALER4 | \
DPU_VPROC_CAP_VSCALER5)
#define DPU_VPROC_CAP_FETCHECO (DPU_VPROC_CAP_FETCHECO0 | \
DPU_VPROC_CAP_FETCHECO1)
struct dpu_unit {
char *name;
unsigned int num;
const unsigned int *ids;
const unsigned long *pec_ofss; /* PixEngCFG */
const unsigned long *ofss;
const unsigned int *dprc_ids;
};
struct cm_reg_ofs {
u32 ipidentifier;
u32 lockunlock;
u32 lockstatus;
u32 userinterruptmask;
u32 interruptenable;
u32 interruptpreset;
u32 interruptclear;
u32 interruptstatus;
u32 userinterruptenable;
u32 userinterruptpreset;
u32 userinterruptclear;
u32 userinterruptstatus;
u32 generalpurpose;
};
struct dpu_data {
unsigned long cm_ofs; /* common */
const struct dpu_unit *cfs;
const struct dpu_unit *decs;
const struct dpu_unit *eds;
const struct dpu_unit *fds;
const struct dpu_unit *fes;
const struct dpu_unit *fgs;
const struct dpu_unit *fls;
const struct dpu_unit *fws;
const struct dpu_unit *hss;
const struct dpu_unit *lbs;
const struct dpu_unit *sts;
const struct dpu_unit *tcons;
const struct dpu_unit *vss;
const struct cm_reg_ofs *cm_reg_ofs;
const unsigned long *unused_irq;
unsigned int syncmode_min_prate; /* need pixel combiner, KHz */
unsigned int singlemode_max_width;
unsigned int master_stream_id;
u32 plane_src_mask;
bool has_dual_ldb;
};
struct dpu_soc {
struct device *dev;
const struct dpu_data *data;
spinlock_t lock;
struct list_head list;
struct device *pd_dc_dev;
struct device *pd_pll0_dev;
struct device *pd_pll1_dev;
struct device_link *pd_dc_link;
struct device_link *pd_pll0_link;
struct device_link *pd_pll1_link;
void __iomem *cm_reg;
int id;
int usecount;
int irq_extdst0_shdload;
int irq_extdst4_shdload;
int irq_extdst1_shdload;
int irq_extdst5_shdload;
int irq_disengcfg_shdload0;
int irq_disengcfg_framecomplete0;
int irq_disengcfg_shdload1;
int irq_disengcfg_framecomplete1;
int irq_line_num;
struct irq_domain *domain;
struct imx_sc_ipc *dpu_ipc_handle;
struct dpu_constframe *cf_priv[4];
struct dpu_disengcfg *dec_priv[2];
struct dpu_extdst *ed_priv[4];
struct dpu_fetchunit *fd_priv[2];
struct dpu_fetchunit *fe_priv[4];
struct dpu_framegen *fg_priv[2];
struct dpu_fetchunit *fl_priv[1];
struct dpu_fetchunit *fw_priv[1];
struct dpu_hscaler *hs_priv[3];
struct dpu_layerblend *lb_priv[4];
struct dpu_store *st_priv[1];
struct dpu_tcon *tcon_priv[2];
struct dpu_vscaler *vs_priv[3];
};
int dpu_format_horz_chroma_subsampling(u32 format);
int dpu_format_vert_chroma_subsampling(u32 format);
int dpu_format_num_planes(u32 format);
int dpu_format_plane_width(int width, u32 format, int plane);
int dpu_format_plane_height(int height, u32 format, int plane);
#define _DECLARE_DPU_UNIT_INIT_FUNC(block) \
void _dpu_##block##_init(struct dpu_soc *dpu, unsigned int id) \
_DECLARE_DPU_UNIT_INIT_FUNC(cf);
_DECLARE_DPU_UNIT_INIT_FUNC(dec);
_DECLARE_DPU_UNIT_INIT_FUNC(ed);
_DECLARE_DPU_UNIT_INIT_FUNC(fd);
_DECLARE_DPU_UNIT_INIT_FUNC(fe);
_DECLARE_DPU_UNIT_INIT_FUNC(fg);
_DECLARE_DPU_UNIT_INIT_FUNC(fl);
_DECLARE_DPU_UNIT_INIT_FUNC(fw);
_DECLARE_DPU_UNIT_INIT_FUNC(hs);
_DECLARE_DPU_UNIT_INIT_FUNC(lb);
_DECLARE_DPU_UNIT_INIT_FUNC(st);
_DECLARE_DPU_UNIT_INIT_FUNC(tcon);
_DECLARE_DPU_UNIT_INIT_FUNC(vs);
#define DECLARE_DPU_UNIT_INIT_FUNC(block) \
int dpu_##block##_init(struct dpu_soc *dpu, unsigned int id, \
unsigned long pec_base, unsigned long base)
DECLARE_DPU_UNIT_INIT_FUNC(cf);
DECLARE_DPU_UNIT_INIT_FUNC(dec);
DECLARE_DPU_UNIT_INIT_FUNC(ed);
DECLARE_DPU_UNIT_INIT_FUNC(fd);
DECLARE_DPU_UNIT_INIT_FUNC(fe);
DECLARE_DPU_UNIT_INIT_FUNC(fg);
DECLARE_DPU_UNIT_INIT_FUNC(fl);
DECLARE_DPU_UNIT_INIT_FUNC(fw);
DECLARE_DPU_UNIT_INIT_FUNC(hs);
DECLARE_DPU_UNIT_INIT_FUNC(lb);
DECLARE_DPU_UNIT_INIT_FUNC(st);
DECLARE_DPU_UNIT_INIT_FUNC(tcon);
DECLARE_DPU_UNIT_INIT_FUNC(vs);
static inline u32 dpu_pec_fu_read(struct dpu_fetchunit *fu, unsigned int offset)
{
return readl(fu->pec_base + offset);
}
static inline void dpu_pec_fu_write(struct dpu_fetchunit *fu,
unsigned int offset, u32 value)
{
writel(value, fu->pec_base + offset);
}
static inline u32 dpu_fu_read(struct dpu_fetchunit *fu, unsigned int offset)
{
return readl(fu->base + offset);
}
static inline void dpu_fu_write(struct dpu_fetchunit *fu,
unsigned int offset, u32 value)
{
writel(value, fu->base + offset);
}
static inline u32 rgb_color(u8 r, u8 g, u8 b, u8 a)
{
return (r << 24) | (g << 16) | (b << 8) | a;
}
static inline u32 yuv_color(u8 y, u8 u, u8 v)
{
return (y << 24) | (u << 16) | (v << 8);
}
void tcon_get_pc(struct dpu_tcon *tcon, void *data);
static const unsigned int cf_ids[] = {0, 1, 4, 5};
static const unsigned int dec_ids[] = {0, 1};
static const unsigned int ed_ids[] = {0, 1, 4, 5};
static const unsigned int fd_ids[] = {0, 1};
static const unsigned int fe_ids[] = {0, 1, 2, 9};
static const unsigned int fg_ids[] = {0, 1};
static const unsigned int fl_ids[] = {0};
static const unsigned int fw_ids[] = {2};
static const unsigned int hs_ids[] = {4, 5, 9};
static const unsigned int lb_ids[] = {0, 1, 2, 3};
static const unsigned int st_ids[] = {9};
static const unsigned int tcon_ids[] = {0, 1};
static const unsigned int vs_ids[] = {4, 5, 9};
static const unsigned int fd_dprc_ids[] = {3, 4};
static const unsigned int fl_dprc_ids[] = {2};
static const unsigned int fw_dprc_ids[] = {5};
struct dpu_pixel_format {
u32 pixel_format;
u32 bits;
u32 shift;
};
static const struct dpu_pixel_format dpu_pixel_format_matrix[] = {
{
DRM_FORMAT_ARGB8888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8),
R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(24),
}, {
DRM_FORMAT_XRGB8888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
}, {
DRM_FORMAT_ABGR8888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8),
R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(24),
}, {
DRM_FORMAT_XBGR8888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(0),
}, {
DRM_FORMAT_RGBA8888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8),
R_SHIFT(24) | G_SHIFT(16) | B_SHIFT(8) | A_SHIFT(0),
}, {
DRM_FORMAT_RGBX8888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
R_SHIFT(24) | G_SHIFT(16) | B_SHIFT(8) | A_SHIFT(0),
}, {
DRM_FORMAT_BGRA8888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(8),
R_SHIFT(8) | G_SHIFT(16) | B_SHIFT(24) | A_SHIFT(0),
}, {
DRM_FORMAT_BGRX8888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
R_SHIFT(8) | G_SHIFT(16) | B_SHIFT(24) | A_SHIFT(0),
}, {
DRM_FORMAT_RGB888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
}, {
DRM_FORMAT_BGR888,
R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
R_SHIFT(0) | G_SHIFT(8) | B_SHIFT(16) | A_SHIFT(0),
}, {
DRM_FORMAT_RGB565,
R_BITS(5) | G_BITS(6) | B_BITS(5) | A_BITS(0),
R_SHIFT(11) | G_SHIFT(5) | B_SHIFT(0) | A_SHIFT(0),
}, {
DRM_FORMAT_YUYV,
Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(8) | A_SHIFT(0),
}, {
DRM_FORMAT_UYVY,
Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
Y_SHIFT(8) | U_SHIFT(0) | V_SHIFT(0) | A_SHIFT(0),
}, {
DRM_FORMAT_NV12,
Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0),
}, {
DRM_FORMAT_NV21,
Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0),
}, {
DRM_FORMAT_NV16,
Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0),
}, {
DRM_FORMAT_NV61,
Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0),
}, {
DRM_FORMAT_NV24,
Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
Y_SHIFT(0) | U_SHIFT(0) | V_SHIFT(8) | A_SHIFT(0),
}, {
DRM_FORMAT_NV42,
Y_BITS(8) | U_BITS(8) | V_BITS(8) | A_BITS(0),
Y_SHIFT(0) | U_SHIFT(8) | V_SHIFT(0) | A_SHIFT(0),
},
};
int dpu_sc_misc_get_handle(struct dpu_soc *dpu);
int dpu_pxlink_set_mst_addr(struct dpu_soc *dpu, int disp_id, u32 val);
int dpu_pxlink_set_mst_enable(struct dpu_soc *dpu, int disp_id, bool enable);
int dpu_pxlink_set_mst_valid(struct dpu_soc *dpu, int disp_id, bool enable);
int dpu_pxlink_set_sync_ctrl(struct dpu_soc *dpu, int disp_id, bool enable);
int dpu_pxlink_set_dc_sync_mode(struct dpu_soc *dpu, bool enable);
int dpu_sc_misc_init(struct dpu_soc *dpu);
#endif /* __DPU_PRV_H__ */

View File

@ -0,0 +1,93 @@
/*
* Copyright 2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <dt-bindings/firmware/imx/rsrc.h>
#include "dpu-prv.h"
static inline int
dpu_sc_misc_set_ctrl(struct dpu_soc *dpu, u32 rsc, u8 ctrl, u32 val)
{
return imx_sc_misc_set_control(dpu->dpu_ipc_handle, rsc, ctrl, val);
}
int dpu_sc_misc_get_handle(struct dpu_soc *dpu)
{
return imx_scu_get_handle(&dpu->dpu_ipc_handle);
}
int dpu_pxlink_set_mst_addr(struct dpu_soc *dpu, int disp_id, u32 val)
{
u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
u8 ctrl = disp_id ?
IMX_SC_C_PXL_LINK_MST2_ADDR : IMX_SC_C_PXL_LINK_MST1_ADDR;
return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, val);
}
int dpu_pxlink_set_mst_enable(struct dpu_soc *dpu, int disp_id, bool enable)
{
u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
u8 ctrl = disp_id ?
IMX_SC_C_PXL_LINK_MST2_ENB: IMX_SC_C_PXL_LINK_MST1_ENB;
return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable);
}
int dpu_pxlink_set_mst_valid(struct dpu_soc *dpu, int disp_id, bool enable)
{
u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
u8 ctrl = disp_id ?
IMX_SC_C_PXL_LINK_MST2_VLD : IMX_SC_C_PXL_LINK_MST1_VLD;
return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable);
}
int dpu_pxlink_set_sync_ctrl(struct dpu_soc *dpu, int disp_id, bool enable)
{
u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
u8 ctrl = disp_id ? IMX_SC_C_SYNC_CTRL1 : IMX_SC_C_SYNC_CTRL0;
return dpu_sc_misc_set_ctrl(dpu, rsc, ctrl, enable);
}
int dpu_pxlink_set_dc_sync_mode(struct dpu_soc *dpu, bool enable)
{
u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
return dpu_sc_misc_set_ctrl(dpu, rsc, IMX_SC_C_MODE, enable);
}
/* KACHUNK_CNT is needed for blit engine */
int dpu_sc_misc_set_kachunk_cnt(struct dpu_soc *dpu, u32 cnt)
{
u32 rsc = dpu->id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0;
return dpu_sc_misc_set_ctrl(dpu, rsc, IMX_SC_C_KACHUNK_CNT, cnt);
}
int dpu_sc_misc_init(struct dpu_soc *dpu)
{
int disp_id, ret = 0;
for (disp_id = 0; disp_id < 2; disp_id++) {
ret |= dpu_pxlink_set_mst_addr(dpu, disp_id, 0);
ret |= dpu_pxlink_set_mst_enable(dpu, disp_id, false);
ret |= dpu_pxlink_set_mst_valid(dpu, disp_id, false);
ret |= dpu_pxlink_set_sync_ctrl(dpu, disp_id, false);
}
ret |= dpu_sc_misc_set_kachunk_cnt(dpu, 32);
return ret;
}

View File

@ -0,0 +1,157 @@
/*
* Copyright 2018-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include "dpu-prv.h"
#define PIXENGCFG_STATIC 0x8
#define DIV(n) (((n) & 0xFF) << 16)
#define DIV_RESET 0x80
struct dpu_store {
void __iomem *pec_base;
void __iomem *base;
struct mutex mutex;
int id;
bool inuse;
struct dpu_soc *dpu;
};
static inline u32 dpu_pec_st_read(struct dpu_store *st, unsigned int offset)
{
return readl(st->pec_base + offset);
}
static inline void dpu_pec_st_write(struct dpu_store *st,
unsigned int offset, u32 value)
{
writel(value, st->pec_base + offset);
}
void store_pixengcfg_syncmode_fixup(struct dpu_store *st, bool enable)
{
struct dpu_soc *dpu;
u32 val;
if (!st)
return;
dpu = st->dpu;
mutex_lock(&st->mutex);
val = dpu_pec_st_read(st, PIXENGCFG_STATIC);
if (enable)
val |= BIT(16);
else
val &= ~BIT(16);
dpu_pec_st_write(st, PIXENGCFG_STATIC, val);
mutex_unlock(&st->mutex);
}
EXPORT_SYMBOL_GPL(store_pixengcfg_syncmode_fixup);
struct dpu_store *dpu_st_get(struct dpu_soc *dpu, int id)
{
struct dpu_store *st;
int i;
for (i = 0; i < ARRAY_SIZE(st_ids); i++)
if (st_ids[i] == id)
break;
if (i == ARRAY_SIZE(st_ids))
return ERR_PTR(-EINVAL);
st = dpu->st_priv[i];
mutex_lock(&st->mutex);
if (st->inuse) {
mutex_unlock(&st->mutex);
return ERR_PTR(-EBUSY);
}
st->inuse = true;
mutex_unlock(&st->mutex);
return st;
}
EXPORT_SYMBOL_GPL(dpu_st_get);
void dpu_st_put(struct dpu_store *st)
{
mutex_lock(&st->mutex);
st->inuse = false;
mutex_unlock(&st->mutex);
}
EXPORT_SYMBOL_GPL(dpu_st_put);
void _dpu_st_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_store *st;
int i;
for (i = 0; i < ARRAY_SIZE(st_ids); i++)
if (st_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(st_ids)))
return;
st = dpu->st_priv[i];
dpu_pec_st_write(st, PIXENGCFG_STATIC, SHDEN | DIV(DIV_RESET));
}
int dpu_st_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_store *st;
int i;
st = devm_kzalloc(dpu->dev, sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(st_ids); i++)
if (st_ids[i] == id)
break;
if (i == ARRAY_SIZE(st_ids))
return -EINVAL;
dpu->st_priv[i] = st;
st->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_32);
if (!st->pec_base)
return -ENOMEM;
st->base = devm_ioremap(dpu->dev, base, SZ_256);
if (!st->base)
return -ENOMEM;
st->dpu = dpu;
st->id = id;
mutex_init(&st->mutex);
_dpu_st_init(dpu, id);
return 0;
}

View File

@ -0,0 +1,329 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include <video/imx8-pc.h>
#include "dpu-prv.h"
#define SSQCNTS 0
#define SSQCYCLE 0x8
#define SWRESET 0xC
#define TCON_CTRL 0x10
#define BYPASS BIT(3)
#define RSDSINVCTRL 0x14
#define MAPBIT3_0 0x18
#define MAPBIT7_4 0x1C
#define MAPBIT11_8 0x20
#define MAPBIT15_12 0x24
#define MAPBIT19_16 0x28
#define MAPBIT23_20 0x2C
#define MAPBIT27_24 0x30
#define MAPBIT31_28 0x34
#define MAPBIT34_32 0x38
#define MAPBIT3_0_DUAL 0x3C
#define MAPBIT7_4_DUAL 0x40
#define MAPBIT11_8_DUAL 0x44
#define MAPBIT15_12_DUAL 0x48
#define MAPBIT19_16_DUAL 0x4C
#define MAPBIT23_20_DUAL 0x50
#define MAPBIT27_24_DUAL 0x54
#define MAPBIT31_28_DUAL 0x58
#define MAPBIT34_32_DUAL 0x5C
#define SPGPOSON(n) (0x60 + (n) * 16)
#define X(n) (((n) & 0x7FFF) << 16)
#define Y(n) ((n) & 0x7FFF)
#define SPGMASKON(n) (0x64 + (n) * 16)
#define SPGPOSOFF(n) (0x68 + (n) * 16)
#define SPGMASKOFF(n) (0x6C + (n) * 16)
#define SMXSIGS(n) (0x120 + (n) * 8)
#define SMXFCTTABLE(n) (0x124 + (n) * 8)
#define RESET_OVER_UNFERFLOW 0x180
#define DUAL_DEBUG 0x184
struct dpu_tcon {
void __iomem *base;
struct mutex mutex;
int id;
bool inuse;
struct dpu_soc *dpu;
struct pc *pc;
};
static inline u32 dpu_tcon_read(struct dpu_tcon *tcon, unsigned int offset)
{
return readl(tcon->base + offset);
}
static inline void dpu_tcon_write(struct dpu_tcon *tcon,
unsigned int offset, u32 value)
{
writel(value, tcon->base + offset);
}
int tcon_set_fmt(struct dpu_tcon *tcon, u32 bus_format)
{
switch (bus_format) {
case MEDIA_BUS_FMT_RGB888_1X24:
dpu_tcon_write(tcon, MAPBIT3_0, 0x19181716);
dpu_tcon_write(tcon, MAPBIT7_4, 0x1d1c1b1a);
dpu_tcon_write(tcon, MAPBIT11_8, 0x0f0e0d0c);
dpu_tcon_write(tcon, MAPBIT15_12, 0x13121110);
dpu_tcon_write(tcon, MAPBIT19_16, 0x05040302);
dpu_tcon_write(tcon, MAPBIT23_20, 0x09080706);
break;
case MEDIA_BUS_FMT_RGB101010_1X30:
case MEDIA_BUS_FMT_RGB888_1X30_PADLO:
case MEDIA_BUS_FMT_RGB666_1X30_PADLO:
dpu_tcon_write(tcon, MAPBIT3_0, 0x17161514);
dpu_tcon_write(tcon, MAPBIT7_4, 0x1b1a1918);
dpu_tcon_write(tcon, MAPBIT11_8, 0x0b0a1d1c);
dpu_tcon_write(tcon, MAPBIT15_12, 0x0f0e0d0c);
dpu_tcon_write(tcon, MAPBIT19_16, 0x13121110);
dpu_tcon_write(tcon, MAPBIT23_20, 0x03020100);
dpu_tcon_write(tcon, MAPBIT27_24, 0x07060504);
dpu_tcon_write(tcon, MAPBIT31_28, 0x00000908);
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(tcon_set_fmt);
/* This function is used to workaround TKT320590 which is related to DPR/PRG. */
void tcon_set_operation_mode(struct dpu_tcon *tcon)
{
u32 val;
val = dpu_tcon_read(tcon, TCON_CTRL);
val &= ~BYPASS;
dpu_tcon_write(tcon, TCON_CTRL, val);
}
EXPORT_SYMBOL_GPL(tcon_set_operation_mode);
void tcon_cfg_videomode(struct dpu_tcon *tcon,
struct drm_display_mode *m, bool side_by_side)
{
u32 val;
int hdisplay, hsync_start, hsync_end;
int vdisplay, vsync_start, vsync_end;
int y;
hdisplay = m->hdisplay;
vdisplay = m->vdisplay;
hsync_start = m->hsync_start;
vsync_start = m->vsync_start;
hsync_end = m->hsync_end;
vsync_end = m->vsync_end;
if (side_by_side) {
hdisplay /= 2;
hsync_start /= 2;
hsync_end /= 2;
}
/*
* TKT320590:
* Turn TCON into operation mode later after the first dumb frame is
* generated by DPU. This makes DPR/PRG be able to evade the frame.
*/
val = dpu_tcon_read(tcon, TCON_CTRL);
val |= BYPASS;
dpu_tcon_write(tcon, TCON_CTRL, val);
/* dsp_control[0]: hsync */
dpu_tcon_write(tcon, SPGPOSON(0), X(hsync_start));
dpu_tcon_write(tcon, SPGMASKON(0), 0xffff);
dpu_tcon_write(tcon, SPGPOSOFF(0), X(hsync_end));
dpu_tcon_write(tcon, SPGMASKOFF(0), 0xffff);
dpu_tcon_write(tcon, SMXSIGS(0), 0x2);
dpu_tcon_write(tcon, SMXFCTTABLE(0), 0x1);
/* dsp_control[1]: vsync */
dpu_tcon_write(tcon, SPGPOSON(1), X(hsync_start) | Y(vsync_start - 1));
dpu_tcon_write(tcon, SPGMASKON(1), 0x0);
dpu_tcon_write(tcon, SPGPOSOFF(1), X(hsync_start) | Y(vsync_end - 1));
dpu_tcon_write(tcon, SPGMASKOFF(1), 0x0);
dpu_tcon_write(tcon, SMXSIGS(1), 0x3);
dpu_tcon_write(tcon, SMXFCTTABLE(1), 0x1);
/* dsp_control[2]: data enable */
/* horizontal */
dpu_tcon_write(tcon, SPGPOSON(2), 0x0);
dpu_tcon_write(tcon, SPGMASKON(2), 0xffff);
dpu_tcon_write(tcon, SPGPOSOFF(2), X(hdisplay));
dpu_tcon_write(tcon, SPGMASKOFF(2), 0xffff);
/* vertical */
dpu_tcon_write(tcon, SPGPOSON(3), 0x0);
dpu_tcon_write(tcon, SPGMASKON(3), 0x7fff0000);
dpu_tcon_write(tcon, SPGPOSOFF(3), Y(vdisplay));
dpu_tcon_write(tcon, SPGMASKOFF(3), 0x7fff0000);
dpu_tcon_write(tcon, SMXSIGS(2), 0x2c);
dpu_tcon_write(tcon, SMXFCTTABLE(2), 0x8);
/* dsp_control[3]: kachuck */
y = vdisplay + 1;
/*
* If sync mode fixup is present, the kachuck signal from slave tcon
* should be one line later than the one from master tcon.
*/
if (side_by_side && tcon_is_slave(tcon))
y++;
dpu_tcon_write(tcon, SPGPOSON(4), X(0x0) | Y(y));
dpu_tcon_write(tcon, SPGMASKON(4), 0x0);
dpu_tcon_write(tcon, SPGPOSOFF(4), X(0x20) | Y(y));
dpu_tcon_write(tcon, SPGMASKOFF(4), 0x0);
dpu_tcon_write(tcon, SMXSIGS(3), 0x6);
dpu_tcon_write(tcon, SMXFCTTABLE(3), 0x2);
}
EXPORT_SYMBOL_GPL(tcon_cfg_videomode);
bool tcon_is_master(struct dpu_tcon *tcon)
{
const struct dpu_data *data = tcon->dpu->data;
return tcon->id == data->master_stream_id;
}
EXPORT_SYMBOL_GPL(tcon_is_master);
bool tcon_is_slave(struct dpu_tcon *tcon)
{
return !tcon_is_master(tcon);
}
EXPORT_SYMBOL_GPL(tcon_is_slave);
void tcon_configure_pc(struct dpu_tcon *tcon, unsigned int di,
unsigned int frame_width, u32 mode, u32 format)
{
if (WARN_ON(!tcon || !tcon->pc))
return;
pc_configure(tcon->pc, di, frame_width, mode, format);
}
EXPORT_SYMBOL_GPL(tcon_configure_pc);
void tcon_enable_pc(struct dpu_tcon *tcon)
{
if (WARN_ON(!tcon || !tcon->pc))
return;
pc_enable(tcon->pc);
}
EXPORT_SYMBOL_GPL(tcon_enable_pc);
void tcon_disable_pc(struct dpu_tcon *tcon)
{
if (WARN_ON(!tcon || !tcon->pc))
return;
pc_disable(tcon->pc);
}
EXPORT_SYMBOL_GPL(tcon_disable_pc);
struct dpu_tcon *dpu_tcon_get(struct dpu_soc *dpu, int id)
{
struct dpu_tcon *tcon;
int i;
for (i = 0; i < ARRAY_SIZE(tcon_ids); i++)
if (tcon_ids[i] == id)
break;
if (i == ARRAY_SIZE(tcon_ids))
return ERR_PTR(-EINVAL);
tcon = dpu->tcon_priv[i];
mutex_lock(&tcon->mutex);
if (tcon->inuse) {
mutex_unlock(&tcon->mutex);
return ERR_PTR(-EBUSY);
}
tcon->inuse = true;
mutex_unlock(&tcon->mutex);
return tcon;
}
EXPORT_SYMBOL_GPL(dpu_tcon_get);
void dpu_tcon_put(struct dpu_tcon *tcon)
{
mutex_lock(&tcon->mutex);
tcon->inuse = false;
mutex_unlock(&tcon->mutex);
}
EXPORT_SYMBOL_GPL(dpu_tcon_put);
struct dpu_tcon *dpu_aux_tcon_peek(struct dpu_tcon *tcon)
{
return tcon->dpu->tcon_priv[tcon->id ^ 1];
}
EXPORT_SYMBOL_GPL(dpu_aux_tcon_peek);
void _dpu_tcon_init(struct dpu_soc *dpu, unsigned int id)
{
}
int dpu_tcon_init(struct dpu_soc *dpu, unsigned int id,
unsigned long unused, unsigned long base)
{
struct dpu_tcon *tcon;
tcon = devm_kzalloc(dpu->dev, sizeof(*tcon), GFP_KERNEL);
if (!tcon)
return -ENOMEM;
dpu->tcon_priv[id] = tcon;
tcon->base = devm_ioremap(dpu->dev, base, SZ_512);
if (!tcon->base)
return -ENOMEM;
tcon->dpu = dpu;
mutex_init(&tcon->mutex);
return 0;
}
void tcon_get_pc(struct dpu_tcon *tcon, void *data)
{
if (WARN_ON(!tcon))
return;
tcon->pc = data;
}

View File

@ -0,0 +1,438 @@
/*
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <video/dpu.h>
#include "dpu-prv.h"
#define PIXENGCFG_DYNAMIC 0x8
#define PIXENGCFG_DYNAMIC_SRC_SEL_MASK 0x3F
#define SETUP1 0xC
#define SCALE_FACTOR_MASK 0xFFFFF
#define SCALE_FACTOR(n) ((n) & 0xFFFFF)
#define SETUP2 0x10
#define SETUP3 0x14
#define SETUP4 0x18
#define SETUP5 0x1C
#define PHASE_OFFSET_MASK 0x1FFFFF
#define PHASE_OFFSET(n) ((n) & 0x1FFFFF)
#define CONTROL 0x20
#define OUTPUT_SIZE_MASK 0x3FFF0000
#define OUTPUT_SIZE(n) ((((n) - 1) << 16) & OUTPUT_SIZE_MASK)
#define FIELD_MODE 0x3000
#define FILTER_MODE 0x100
#define SCALE_MODE 0x10
#define MODE 0x1
static const vs_src_sel_t src_sels[3][6] = {
{
VS_SRC_SEL__DISABLE,
VS_SRC_SEL__FETCHDECODE0,
VS_SRC_SEL__MATRIX4,
VS_SRC_SEL__HSCALER4,
}, {
VS_SRC_SEL__DISABLE,
VS_SRC_SEL__FETCHDECODE1,
VS_SRC_SEL__MATRIX5,
VS_SRC_SEL__HSCALER5,
}, {
VS_SRC_SEL__DISABLE,
VS_SRC_SEL__MATRIX9,
VS_SRC_SEL__HSCALER9,
},
};
struct dpu_vscaler {
void __iomem *pec_base;
void __iomem *base;
struct mutex mutex;
int id;
bool inuse;
struct dpu_soc *dpu;
/* see DPU_PLANE_SRC_xxx */
unsigned int stream_id;
};
static inline u32 dpu_pec_vs_read(struct dpu_vscaler *vs,
unsigned int offset)
{
return readl(vs->pec_base + offset);
}
static inline void dpu_pec_vs_write(struct dpu_vscaler *vs,
unsigned int offset, u32 value)
{
writel(value, vs->pec_base + offset);
}
static inline u32 dpu_vs_read(struct dpu_vscaler *vs, unsigned int offset)
{
return readl(vs->base + offset);
}
static inline void dpu_vs_write(struct dpu_vscaler *vs,
unsigned int offset, u32 value)
{
writel(value, vs->base + offset);
}
int vscaler_pixengcfg_dynamic_src_sel(struct dpu_vscaler *vs, vs_src_sel_t src)
{
struct dpu_soc *dpu = vs->dpu;
const unsigned int vs_id_array[] = {4, 5, 9};
int i, j;
u32 val;
for (i = 0; i < ARRAY_SIZE(vs_id_array); i++)
if (vs_id_array[i] == vs->id)
break;
if (WARN_ON(i == ARRAY_SIZE(vs_id_array)))
return -EINVAL;
mutex_lock(&vs->mutex);
for (j = 0; j < ARRAY_SIZE(src_sels[0]); j++) {
if (src_sels[i][j] == src) {
val = dpu_pec_vs_read(vs, PIXENGCFG_DYNAMIC);
val &= ~PIXENGCFG_DYNAMIC_SRC_SEL_MASK;
val |= src;
dpu_pec_vs_write(vs, PIXENGCFG_DYNAMIC, val);
mutex_unlock(&vs->mutex);
return 0;
}
}
mutex_unlock(&vs->mutex);
dev_err(dpu->dev, "Invalid source for VScaler%d\n", vs->id);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(vscaler_pixengcfg_dynamic_src_sel);
void vscaler_pixengcfg_clken(struct dpu_vscaler *vs, pixengcfg_clken_t clken)
{
u32 val;
mutex_lock(&vs->mutex);
val = dpu_pec_vs_read(vs, PIXENGCFG_DYNAMIC);
val &= ~CLKEN_MASK;
val |= clken << CLKEN_MASK_SHIFT;
dpu_pec_vs_write(vs, PIXENGCFG_DYNAMIC, val);
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_pixengcfg_clken);
void vscaler_shden(struct dpu_vscaler *vs, bool enable)
{
u32 val;
mutex_lock(&vs->mutex);
val = dpu_vs_read(vs, STATICCONTROL);
if (enable)
val |= SHDEN;
else
val &= ~SHDEN;
dpu_vs_write(vs, STATICCONTROL, val);
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_shden);
void vscaler_setup1(struct dpu_vscaler *vs, u32 src, u32 dst, bool deinterlace)
{
struct dpu_soc *dpu = vs->dpu;
u32 scale_factor;
u64 tmp64;
if (deinterlace)
dst *= 2;
if (src == dst) {
scale_factor = 0x80000;
} else {
if (src > dst) {
tmp64 = (u64)((u64)dst * 0x80000);
do_div(tmp64, src);
} else {
tmp64 = (u64)((u64)src * 0x80000);
do_div(tmp64, dst);
}
scale_factor = (u32)tmp64;
}
WARN_ON(scale_factor > 0x80000);
mutex_lock(&vs->mutex);
dpu_vs_write(vs, SETUP1, SCALE_FACTOR(scale_factor));
mutex_unlock(&vs->mutex);
dev_dbg(dpu->dev, "Vscaler%d scale factor 0x%08x\n",
vs->id, scale_factor);
}
EXPORT_SYMBOL_GPL(vscaler_setup1);
void vscaler_setup2(struct dpu_vscaler *vs, bool deinterlace)
{
/* 0x20000: +0.25 phase offset for deinterlace */
u32 phase_offset = deinterlace ? 0x20000 : 0;
mutex_lock(&vs->mutex);
dpu_vs_write(vs, SETUP2, PHASE_OFFSET(phase_offset));
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_setup2);
void vscaler_setup3(struct dpu_vscaler *vs, bool deinterlace)
{
/* 0x1e0000: -0.25 phase offset for deinterlace */
u32 phase_offset = deinterlace ? 0x1e0000 : 0;
mutex_lock(&vs->mutex);
dpu_vs_write(vs, SETUP3, PHASE_OFFSET(phase_offset));
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_setup3);
void vscaler_setup4(struct dpu_vscaler *vs, u32 phase_offset)
{
mutex_lock(&vs->mutex);
dpu_vs_write(vs, SETUP4, PHASE_OFFSET(phase_offset));
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_setup4);
void vscaler_setup5(struct dpu_vscaler *vs, u32 phase_offset)
{
mutex_lock(&vs->mutex);
dpu_vs_write(vs, SETUP5, PHASE_OFFSET(phase_offset));
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_setup5);
void vscaler_output_size(struct dpu_vscaler *vs, u32 line_num)
{
u32 val;
mutex_lock(&vs->mutex);
val = dpu_vs_read(vs, CONTROL);
val &= ~OUTPUT_SIZE_MASK;
val |= OUTPUT_SIZE(line_num);
dpu_vs_write(vs, CONTROL, val);
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_output_size);
void vscaler_field_mode(struct dpu_vscaler *vs, scaler_field_mode_t m)
{
u32 val;
mutex_lock(&vs->mutex);
val = dpu_vs_read(vs, CONTROL);
val &= ~FIELD_MODE;
val |= m;
dpu_vs_write(vs, CONTROL, val);
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_field_mode);
void vscaler_filter_mode(struct dpu_vscaler *vs, scaler_filter_mode_t m)
{
u32 val;
mutex_lock(&vs->mutex);
val = dpu_vs_read(vs, CONTROL);
val &= ~FILTER_MODE;
val |= m;
dpu_vs_write(vs, CONTROL, val);
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_filter_mode);
void vscaler_scale_mode(struct dpu_vscaler *vs, scaler_scale_mode_t m)
{
u32 val;
mutex_lock(&vs->mutex);
val = dpu_vs_read(vs, CONTROL);
val &= ~SCALE_MODE;
val |= m;
dpu_vs_write(vs, CONTROL, val);
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_scale_mode);
void vscaler_mode(struct dpu_vscaler *vs, scaler_mode_t m)
{
u32 val;
mutex_lock(&vs->mutex);
val = dpu_vs_read(vs, CONTROL);
val &= ~MODE;
val |= m;
dpu_vs_write(vs, CONTROL, val);
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(vscaler_mode);
bool vscaler_is_enabled(struct dpu_vscaler *vs)
{
u32 val;
mutex_lock(&vs->mutex);
val = dpu_vs_read(vs, CONTROL);
mutex_unlock(&vs->mutex);
return (val & MODE) == SCALER_ACTIVE;
}
EXPORT_SYMBOL_GPL(vscaler_is_enabled);
dpu_block_id_t vscaler_get_block_id(struct dpu_vscaler *vs)
{
switch (vs->id) {
case 4:
return ID_VSCALER4;
case 5:
return ID_VSCALER5;
case 9:
return ID_VSCALER9;
default:
WARN_ON(1);
}
return ID_NONE;
}
EXPORT_SYMBOL_GPL(vscaler_get_block_id);
unsigned int vscaler_get_stream_id(struct dpu_vscaler *vs)
{
return vs->stream_id;
}
EXPORT_SYMBOL_GPL(vscaler_get_stream_id);
void vscaler_set_stream_id(struct dpu_vscaler *vs, unsigned int id)
{
switch (id) {
case DPU_PLANE_SRC_TO_DISP_STREAM0:
case DPU_PLANE_SRC_TO_DISP_STREAM1:
case DPU_PLANE_SRC_DISABLED:
vs->stream_id = id;
break;
default:
WARN_ON(1);
}
}
EXPORT_SYMBOL_GPL(vscaler_set_stream_id);
struct dpu_vscaler *dpu_vs_get(struct dpu_soc *dpu, int id)
{
struct dpu_vscaler *vs;
int i;
for (i = 0; i < ARRAY_SIZE(vs_ids); i++)
if (vs_ids[i] == id)
break;
if (i == ARRAY_SIZE(vs_ids))
return ERR_PTR(-EINVAL);
vs = dpu->vs_priv[i];
mutex_lock(&vs->mutex);
if (vs->inuse) {
mutex_unlock(&vs->mutex);
return ERR_PTR(-EBUSY);
}
vs->inuse = true;
mutex_unlock(&vs->mutex);
return vs;
}
EXPORT_SYMBOL_GPL(dpu_vs_get);
void dpu_vs_put(struct dpu_vscaler *vs)
{
mutex_lock(&vs->mutex);
vs->inuse = false;
mutex_unlock(&vs->mutex);
}
EXPORT_SYMBOL_GPL(dpu_vs_put);
void _dpu_vs_init(struct dpu_soc *dpu, unsigned int id)
{
struct dpu_vscaler *vs;
int i;
for (i = 0; i < ARRAY_SIZE(vs_ids); i++)
if (vs_ids[i] == id)
break;
if (WARN_ON(i == ARRAY_SIZE(vs_ids)))
return;
vs = dpu->vs_priv[i];
vscaler_shden(vs, true);
vscaler_setup2(vs, false);
vscaler_setup3(vs, false);
vscaler_setup4(vs, 0);
vscaler_setup5(vs, 0);
vscaler_pixengcfg_dynamic_src_sel(vs, VS_SRC_SEL__DISABLE);
}
int dpu_vs_init(struct dpu_soc *dpu, unsigned int id,
unsigned long pec_base, unsigned long base)
{
struct dpu_vscaler *vs;
int i;
vs = devm_kzalloc(dpu->dev, sizeof(*vs), GFP_KERNEL);
if (!vs)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(vs_ids); i++)
if (vs_ids[i] == id)
break;
if (i == ARRAY_SIZE(vs_ids))
return -EINVAL;
dpu->vs_priv[i] = vs;
vs->pec_base = devm_ioremap(dpu->dev, pec_base, SZ_8);
if (!vs->pec_base)
return -ENOMEM;
vs->base = devm_ioremap(dpu->dev, base, SZ_1K);
if (!vs->base)
return -ENOMEM;
vs->dpu = dpu;
vs->id = id;
mutex_init(&vs->mutex);
_dpu_vs_init(dpu, id);
return 0;
}

View File

@ -15,7 +15,7 @@ source "drivers/char/agp/Kconfig"
source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/host1x/Kconfig"
source "drivers/gpu/ipu-v3/Kconfig"
source "drivers/gpu/imx/Kconfig"
source "drivers/gpu/drm/Kconfig"

718
include/video/dpu.h Normal file
View File

@ -0,0 +1,718 @@
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#ifndef __DRM_DPU_H__
#define __DRM_DPU_H__
#include <drm/drm_crtc.h>
#include <drm/drm_modes.h>
#include <video/videomode.h>
struct dpu_soc;
enum dpu_irq {
IRQ_STORE9_SHDLOAD = 0,
IRQ_STORE9_FRAMECOMPLETE = 1,
IRQ_STORE9_SEQCOMPLETE = 2,
IRQ_EXTDST0_SHDLOAD = 3,
IRQ_EXTDST0_FRAMECOMPLETE = 4,
IRQ_EXTDST0_SEQCOMPLETE = 5,
IRQ_EXTDST4_SHDLOAD = 6,
IRQ_EXTDST4_FRAMECOMPLETE = 7,
IRQ_EXTDST4_SEQCOMPLETE = 8,
IRQ_EXTDST1_SHDLOAD = 9,
IRQ_EXTDST1_FRAMECOMPLETE = 10,
IRQ_EXTDST1_SEQCOMPLETE = 11,
IRQ_EXTDST5_SHDLOAD = 12,
IRQ_EXTDST5_FRAMECOMPLETE = 13,
IRQ_EXTDST5_SEQCOMPLETE = 14,
IRQ_DISENGCFG_SHDLOAD0 = 15,
IRQ_DISENGCFG_FRAMECOMPLETE0 = 16,
IRQ_DISENGCFG_SEQCOMPLETE0 = 17,
IRQ_FRAMEGEN0_INT0 = 18,
IRQ_FRAMEGEN0_INT1 = 19,
IRQ_FRAMEGEN0_INT2 = 20,
IRQ_FRAMEGEN0_INT3 = 21,
IRQ_SIG0_SHDLOAD = 22,
IRQ_SIG0_VALID = 23,
IRQ_SIG0_ERROR = 24,
IRQ_DISENGCFG_SHDLOAD1 = 25,
IRQ_DISENGCFG_FRAMECOMPLETE1 = 26,
IRQ_DISENGCFG_SEQCOMPLETE1 = 27,
IRQ_FRAMEGEN1_INT0 = 28,
IRQ_FRAMEGEN1_INT1 = 29,
IRQ_FRAMEGEN1_INT2 = 30,
IRQ_FRAMEGEN1_INT3 = 31,
IRQ_SIG1_SHDLOAD = 32,
IRQ_SIG1_VALID = 33,
IRQ_SIG1_ERROR = 34,
IRQ_RESERVED = 35,
IRQ_CMDSEQ_ERROR = 36,
IRQ_COMCTRL_SW0 = 37,
IRQ_COMCTRL_SW1 = 38,
IRQ_COMCTRL_SW2 = 39,
IRQ_COMCTRL_SW3 = 40,
IRQ_FRAMEGEN0_PRIMSYNC_ON = 41,
IRQ_FRAMEGEN0_PRIMSYNC_OFF = 42,
IRQ_FRAMEGEN0_SECSYNC_ON = 43,
IRQ_FRAMEGEN0_SECSYNC_OFF = 44,
IRQ_FRAMEGEN1_PRIMSYNC_ON = 45,
IRQ_FRAMEGEN1_PRIMSYNC_OFF = 46,
IRQ_FRAMEGEN1_SECSYNC_ON = 47,
IRQ_FRAMEGEN1_SECSYNC_OFF = 48,
};
typedef enum {
ID_NONE = 0x00, /* 0 */
ID_FETCHDECODE9 = 0x01, /* 1 */
ID_FETCHPERSP9 = 0x02, /* 2 */
ID_FETCHECO9 = 0x03, /* 3 */
ID_ROP9 = 0x04, /* 4 */
ID_CLUT9 = 0x05, /* 5 */
ID_MATRIX9 = 0x06, /* 6 */
ID_HSCALER9 = 0x07, /* 7 */
ID_VSCALER9 = 0x08, /* 8 */
ID_FILTER9 = 0x09, /* 9 */
ID_BLITBLEND9 = 0x0A, /* 10 */
ID_CONSTFRAME0 = 0x0C, /* 12 */
ID_CONSTFRAME4 = 0x0E, /* 14 */
ID_CONSTFRAME1 = 0x10, /* 16 */
ID_CONSTFRAME5 = 0x12, /* 18 */
ID_FETCHWARP2 = 0x14, /* 20 */
ID_FETCHECO2 = 0x15, /* 21 */
ID_FETCHDECODE0 = 0x16, /* 22 */
ID_FETCHECO0 = 0x17, /* 23 */
ID_FETCHDECODE1 = 0x18, /* 24 */
ID_FETCHECO1 = 0x19, /* 25 */
ID_FETCHLAYER0 = 0x1a, /* 26 */
ID_MATRIX4 = 0x1B, /* 27 */
ID_HSCALER4 = 0x1C, /* 28 */
ID_VSCALER4 = 0x1D, /* 29 */
ID_MATRIX5 = 0x1E, /* 30 */
ID_HSCALER5 = 0x1F, /* 31 */
ID_VSCALER5 = 0x20, /* 32 */
ID_LAYERBLEND0 = 0x21, /* 33 */
ID_LAYERBLEND1 = 0x22, /* 34 */
ID_LAYERBLEND2 = 0x23, /* 35 */
ID_LAYERBLEND3 = 0x24, /* 36 */
} dpu_block_id_t;
typedef enum {
ED_SRC_DISABLE = ID_NONE,
ED_SRC_BLITBLEND9 = ID_BLITBLEND9,
ED_SRC_CONSTFRAME0 = ID_CONSTFRAME0,
ED_SRC_CONSTFRAME1 = ID_CONSTFRAME1,
ED_SRC_CONSTFRAME4 = ID_CONSTFRAME4,
ED_SRC_CONSTFRAME5 = ID_CONSTFRAME5,
ED_SRC_MATRIX4 = ID_MATRIX4,
ED_SRC_HSCALER4 = ID_HSCALER4,
ED_SRC_VSCALER4 = ID_VSCALER4,
/* content stream(extdst 0/1) only */
ED_SRC_MATRIX5 = ID_MATRIX5,
ED_SRC_HSCALER5 = ID_HSCALER5,
ED_SRC_VSCALER5 = ID_VSCALER5,
/* content stream(extdst 0/1) only */
ED_SRC_LAYERBLEND3 = ID_LAYERBLEND3,
ED_SRC_LAYERBLEND2 = ID_LAYERBLEND2,
ED_SRC_LAYERBLEND1 = ID_LAYERBLEND1,
ED_SRC_LAYERBLEND0 = ID_LAYERBLEND0,
} extdst_src_sel_t;
typedef enum {
SINGLE, /* Reconfig pipeline after explicit trigger */
AUTO, /* Reconfig pipeline after every kick when idle */
} ed_sync_mode_t;
typedef enum {
PSTATUS_EMPTY,
PSTATUS_RUNNING,
PSTATUS_RUNNING_RETRIGGERED,
PSTATUS_RESERVED
} ed_pipeline_status_t;
typedef enum {
SOFTWARE = 0, /* kick generation by KICK field only */
EXTERNAL = BIT(8), /* kick signal from external allowed */
} ed_kick_mode_t;
typedef enum {
FD_SRC_DISABLE = ID_NONE,
FD_SRC_FETCHECO0 = ID_FETCHECO0,
FD_SRC_FETCHECO1 = ID_FETCHECO1,
FD_SRC_FETCHECO2 = ID_FETCHECO2,
FD_SRC_FETCHDECODE0 = ID_FETCHDECODE0,
FD_SRC_FETCHDECODE1 = ID_FETCHDECODE1,
FD_SRC_FETCHWARP2 = ID_FETCHWARP2,
} fd_dynamic_src_sel_t;
typedef enum {
/* RL and RLAD decoder */
FETCHTYPE__DECODE,
/* fractional plane(8 layers) */
FETCHTYPE__LAYER,
/* arbitrary warping and fractional plane(8 layers) */
FETCHTYPE__WARP,
/* minimum feature set for alpha, chroma and coordinate planes */
FETCHTYPE__ECO,
/* affine, perspective and arbitrary warping */
FETCHTYPE__PERSP,
/* affine and arbitrary warping */
FETCHTYPE__ROT,
/* RL and RLAD decoder, reduced feature set */
FETCHTYPE__DECODEL,
/* fractional plane(8 layers), reduced feature set */
FETCHTYPE__LAYERL,
/* affine and arbitrary warping, reduced feature set */
FETCHTYPE__ROTL,
} fetchtype_t;
typedef enum {
/* No side-by-side synchronization. */
FGSYNCMODE__OFF = 0,
/* Framegen is master. */
FGSYNCMODE__MASTER = 1 << 1,
/* Runs in cyclic synchronization mode. */
FGSYNCMODE__SLAVE_CYC = 2 << 1,
/* Runs in one time synchronization mode. */
FGSYNCMODE__SLAVE_ONCE = 3 << 1,
} fgsyncmode_t;
typedef enum {
FGDM__BLACK,
/* Constant Color Background is shown. */
FGDM__CONSTCOL,
FGDM__PRIM,
FGDM__SEC,
FGDM__PRIM_ON_TOP,
FGDM__SEC_ON_TOP,
/* White color background with test pattern is shown. */
FGDM__TEST,
} fgdm_t;
typedef enum {
HS_SRC_SEL__DISABLE = ID_NONE,
HS_SRC_SEL__MATRIX9 = ID_MATRIX9,
HS_SRC_SEL__VSCALER9 = ID_VSCALER9,
HS_SRC_SEL__FILTER9 = ID_FILTER9,
HS_SRC_SEL__FETCHDECODE0 = ID_FETCHDECODE0,
HS_SRC_SEL__FETCHDECODE1 = ID_FETCHDECODE1,
HS_SRC_SEL__MATRIX4 = ID_MATRIX4,
HS_SRC_SEL__VSCALER4 = ID_VSCALER4,
HS_SRC_SEL__MATRIX5 = ID_MATRIX5,
HS_SRC_SEL__VSCALER5 = ID_VSCALER5,
} hs_src_sel_t;
typedef enum {
/* common options */
LB_PRIM_SEL__DISABLE = ID_NONE,
LB_PRIM_SEL__BLITBLEND9 = ID_BLITBLEND9,
LB_PRIM_SEL__CONSTFRAME0 = ID_CONSTFRAME0,
LB_PRIM_SEL__CONSTFRAME1 = ID_CONSTFRAME1,
LB_PRIM_SEL__CONSTFRAME4 = ID_CONSTFRAME4,
LB_PRIM_SEL__CONSTFRAME5 = ID_CONSTFRAME5,
LB_PRIM_SEL__MATRIX4 = ID_MATRIX4,
LB_PRIM_SEL__HSCALER4 = ID_HSCALER4,
LB_PRIM_SEL__VSCALER4 = ID_VSCALER4,
LB_PRIM_SEL__MATRIX5 = ID_MATRIX5,
LB_PRIM_SEL__HSCALER5 = ID_HSCALER5,
LB_PRIM_SEL__VSCALER5 = ID_VSCALER5,
/*
* special options:
* layerblend(n) has n special options,
* from layerblend0 to layerblend(n - 1), e.g.,
* layerblend3 has 3 special options -
* layerblend0/1/2.
*/
LB_PRIM_SEL__LAYERBLEND3 = ID_LAYERBLEND3,
LB_PRIM_SEL__LAYERBLEND2 = ID_LAYERBLEND2,
LB_PRIM_SEL__LAYERBLEND1 = ID_LAYERBLEND1,
LB_PRIM_SEL__LAYERBLEND0 = ID_LAYERBLEND0,
} lb_prim_sel_t;
typedef enum {
LB_SEC_SEL__DISABLE = ID_NONE,
LB_SEC_SEL__FETCHWARP2 = ID_FETCHWARP2,
LB_SEC_SEL__FETCHDECODE0 = ID_FETCHDECODE0,
LB_SEC_SEL__FETCHDECODE1 = ID_FETCHDECODE1,
LB_SEC_SEL__MATRIX4 = ID_MATRIX4,
LB_SEC_SEL__HSCALER4 = ID_HSCALER4,
LB_SEC_SEL__VSCALER4 = ID_VSCALER4,
LB_SEC_SEL__MATRIX5 = ID_MATRIX5,
LB_SEC_SEL__HSCALER5 = ID_HSCALER5,
LB_SEC_SEL__VSCALER5 = ID_VSCALER5,
LB_SEC_SEL__FETCHLAYER0 = ID_FETCHLAYER0,
} lb_sec_sel_t;
typedef enum {
PRIMARY, /* background plane */
SECONDARY, /* foreground plane */
BOTH,
} lb_shadow_sel_t;
typedef enum {
LB_NEUTRAL, /* Output is same as primary input. */
LB_BLEND,
} lb_mode_t;
typedef enum {
/* Constant 0 indicates frame or top field. */
SCALER_ALWAYS0 = 0x0,
/* Constant 1 indicates bottom field. */
SCALER_ALWAYS1 = 0x1 << 12,
/* Output field polarity is taken from input field polarity. */
SCALER_INPUT = 0x2 << 12,
/* Output field polarity toggles, starting with 0 after reset. */
SCALER_TOGGLE = 0x3 << 12,
} scaler_field_mode_t;
typedef enum {
/* pointer-sampling */
SCALER_NEAREST = 0x0,
/* box filter */
SCALER_LINEAR = 0x100,
} scaler_filter_mode_t;
typedef enum {
SCALER_DOWNSCALE = 0x0,
SCALER_UPSCALE = 0x10,
} scaler_scale_mode_t;
typedef enum {
/* Pixel by-pass the scaler, all other settings are ignored. */
SCALER_NEUTRAL = 0x0,
/* Scaler is active. */
SCALER_ACTIVE = 0x1,
} scaler_mode_t;
typedef enum {
VS_SRC_SEL__DISABLE = ID_NONE,
VS_SRC_SEL__MATRIX9 = ID_MATRIX9,
VS_SRC_SEL__HSCALER9 = ID_HSCALER9,
VS_SRC_SEL__FETCHDECODE0 = ID_FETCHDECODE0,
VS_SRC_SEL__FETCHDECODE1 = ID_FETCHDECODE1,
VS_SRC_SEL__MATRIX4 = ID_MATRIX4,
VS_SRC_SEL__HSCALER4 = ID_HSCALER4,
VS_SRC_SEL__MATRIX5 = ID_MATRIX5,
VS_SRC_SEL__HSCALER5 = ID_HSCALER5,
} vs_src_sel_t;
#define CLKEN_MASK (0x3 << 24)
#define CLKEN_MASK_SHIFT 24
typedef enum {
CLKEN__DISABLE = 0x0,
CLKEN__AUTOMATIC = 0x1,
CLKEN__FULL = 0x3,
} pixengcfg_clken_t;
/* fetch unit types */
enum {
FU_T_NA,
FU_T_FD,
FU_T_FE,
FU_T_FL,
FU_T_FW,
};
struct dpu_fetchunit;
struct dpu_fetchunit_ops {
void (*set_burstlength)(struct dpu_fetchunit *fu,
unsigned int x_offset, unsigned int mt_w,
int bpp, dma_addr_t baddr, bool use_prefetch);
void (*set_baseaddress)(struct dpu_fetchunit *fu, unsigned int width,
unsigned int x_offset, unsigned int y_offset,
unsigned int mt_w, unsigned int mt_h,
int bpp, dma_addr_t baddr);
void (*set_src_bpp)(struct dpu_fetchunit *fu, int bpp);
void (*set_src_stride)(struct dpu_fetchunit *fu,
unsigned int width, unsigned int x_offset,
unsigned int mt_w, int bpp, unsigned int stride,
dma_addr_t baddr, bool use_prefetch);
void (*set_src_buf_dimensions)(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h, u32 fmt,
bool deinterlace);
void (*set_fmt)(struct dpu_fetchunit *fu, u32 fmt,
enum drm_color_encoding color_encoding,
enum drm_color_range color_range,
bool deinterlace);
void (*set_pixel_blend_mode)(struct dpu_fetchunit *fu,
unsigned int pixel_blend_mode, u16 alpha,
u32 fb_format);
void (*enable_src_buf)(struct dpu_fetchunit *fu);
void (*disable_src_buf)(struct dpu_fetchunit *fu);
bool (*is_enabled)(struct dpu_fetchunit *fu);
void (*set_framedimensions)(struct dpu_fetchunit *fu,
unsigned int w, unsigned int h,
bool deinterlace);
void (*set_controltrigger)(struct dpu_fetchunit *fu);
unsigned int (*get_stream_id)(struct dpu_fetchunit *fu);
void (*set_stream_id)(struct dpu_fetchunit *fu, unsigned int id);
void (*pin_off)(struct dpu_fetchunit *fu);
void (*unpin_off)(struct dpu_fetchunit *fu);
bool (*is_pinned_off)(struct dpu_fetchunit *fu);
};
struct dpu_fetchunit {
void __iomem *pec_base;
void __iomem *base;
char *name;
struct mutex mutex;
int id;
int sub_id; /* for fractional fetch units */
int type;
bool inuse;
struct dpu_soc *dpu;
/* see DPU_PLANE_SRC_xxx */
unsigned int stream_id;
bool pin_off;
struct dprc *dprc;
const struct dpu_fetchunit_ops *ops;
};
int dpu_map_irq(struct dpu_soc *dpu, int irq);
/* Constant Frame Unit */
struct dpu_constframe;
void constframe_shden(struct dpu_constframe *cf, bool enable);
void constframe_framedimensions(struct dpu_constframe *cf, unsigned int w,
unsigned int h);
void constframe_framedimensions_copy_prim(struct dpu_constframe *cf);
void constframe_constantcolor(struct dpu_constframe *cf, unsigned int r,
unsigned int g, unsigned int b, unsigned int a);
void constframe_controltrigger(struct dpu_constframe *cf, bool trigger);
struct dpu_constframe *dpu_cf_get(struct dpu_soc *dpu, int id);
void dpu_cf_put(struct dpu_constframe *cf);
struct dpu_constframe *dpu_aux_cf_peek(struct dpu_constframe *cf);
/* Display Engine Configuration Unit */
struct dpu_disengcfg;
struct dpu_disengcfg *dpu_dec_get(struct dpu_soc *dpu, int id);
void dpu_dec_put(struct dpu_disengcfg *dec);
struct dpu_disengcfg *dpu_aux_dec_peek(struct dpu_disengcfg *dec);
/* External Destination Unit */
struct dpu_extdst;
void extdst_pixengcfg_shden(struct dpu_extdst *ed, bool enable);
void extdst_pixengcfg_powerdown(struct dpu_extdst *ed, bool powerdown);
void extdst_pixengcfg_sync_mode(struct dpu_extdst *ed, ed_sync_mode_t mode);
void extdst_pixengcfg_reset(struct dpu_extdst *ed, bool reset);
void extdst_pixengcfg_div(struct dpu_extdst *ed, u16 div);
void extdst_pixengcfg_syncmode_master(struct dpu_extdst *ed, bool enable);
int extdst_pixengcfg_src_sel(struct dpu_extdst *ed, extdst_src_sel_t src);
void extdst_pixengcfg_sel_shdldreq(struct dpu_extdst *ed);
void extdst_pixengcfg_shdldreq(struct dpu_extdst *ed, u32 req_mask);
void extdst_pixengcfg_sync_trigger(struct dpu_extdst *ed);
void extdst_pixengcfg_trigger_sequence_complete(struct dpu_extdst *ed);
bool extdst_pixengcfg_is_sync_busy(struct dpu_extdst *ed);
ed_pipeline_status_t extdst_pixengcfg_pipeline_status(struct dpu_extdst *ed);
void extdst_shden(struct dpu_extdst *ed, bool enable);
void extdst_kick_mode(struct dpu_extdst *ed, ed_kick_mode_t mode);
void extdst_perfcountmode(struct dpu_extdst *ed, bool enable);
void extdst_gamma_apply_enable(struct dpu_extdst *ed, bool enable);
void extdst_kick(struct dpu_extdst *ed);
void extdst_cnt_err_clear(struct dpu_extdst *ed);
bool extdst_cnt_err_status(struct dpu_extdst *ed);
u32 extdst_last_control_word(struct dpu_extdst *ed);
void extdst_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y);
void extdst_last_pixel_cnt(struct dpu_extdst *ed, u16 *x, u16 *y);
u32 extdst_perfresult(struct dpu_extdst *ed);
bool extdst_is_master(struct dpu_extdst *ed);
struct dpu_extdst *dpu_ed_get(struct dpu_soc *dpu, int id);
void dpu_ed_put(struct dpu_extdst *ed);
struct dpu_extdst *dpu_aux_ed_peek(struct dpu_extdst *ed);
/* Fetch Decode Unit */
int fetchdecode_pixengcfg_dynamic_src_sel(struct dpu_fetchunit *fu,
fd_dynamic_src_sel_t src);
void fetchdecode_layeroffset(struct dpu_fetchunit *fd, unsigned int x,
unsigned int y);
void fetchdecode_clipoffset(struct dpu_fetchunit *fd, unsigned int x,
unsigned int y);
void fetchdecode_clipdimensions(struct dpu_fetchunit *fd, unsigned int w,
unsigned int h);
void fetchdecode_rgb_constantcolor(struct dpu_fetchunit *fd,
u8 r, u8 g, u8 b, u8 a);
void fetchdecode_yuv_constantcolor(struct dpu_fetchunit *fd,
u8 y, u8 u, u8 v);
int fetchdecode_fetchtype(struct dpu_fetchunit *fd, fetchtype_t *type);
u32 fetchdecode_get_vproc_mask(struct dpu_fetchunit *fd);
bool fetchdecode_need_fetcheco(struct dpu_fetchunit *fd, u32 fmt);
struct dpu_fetchunit *dpu_fd_get(struct dpu_soc *dpu, int id);
void dpu_fd_put(struct dpu_fetchunit *fu);
/* Fetch ECO Unit */
void fetcheco_layeroffset(struct dpu_fetchunit *fu, unsigned int x,
unsigned int y);
void fetcheco_clipoffset(struct dpu_fetchunit *fu, unsigned int x,
unsigned int y);
void fetcheco_clipdimensions(struct dpu_fetchunit *fu, unsigned int w,
unsigned int h);
void fetcheco_frameresampling(struct dpu_fetchunit *fu, unsigned int x,
unsigned int y);
int fetcheco_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type);
dpu_block_id_t fetcheco_get_block_id(struct dpu_fetchunit *fu);
struct dpu_fetchunit *dpu_fe_get(struct dpu_soc *dpu, int id);
void dpu_fe_put(struct dpu_fetchunit *fu);
/* Fetch Layer Unit */
void fetchlayer_rgb_constantcolor(struct dpu_fetchunit *fu,
u8 r, u8 g, u8 b, u8 a);
void fetchlayer_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v);
int fetchlayer_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type);
struct dpu_fetchunit *dpu_fl_get(struct dpu_soc *dpu, int id);
void dpu_fl_put(struct dpu_fetchunit *fu);
/* Fetch Warp Unit */
void fetchwarp_rgb_constantcolor(struct dpu_fetchunit *fu,
u8 r, u8 g, u8 b, u8 a);
void fetchwarp_yuv_constantcolor(struct dpu_fetchunit *fu, u8 y, u8 u, u8 v);
int fetchwarp_fetchtype(struct dpu_fetchunit *fu, fetchtype_t *type);
struct dpu_fetchunit *dpu_fw_get(struct dpu_soc *dpu, int id);
void dpu_fw_put(struct dpu_fetchunit *fu);
/* Frame Generator Unit */
struct dpu_framegen;
void framegen_enable(struct dpu_framegen *fg);
void framegen_disable(struct dpu_framegen *fg);
void framegen_enable_pixel_link(struct dpu_framegen *fg);
void framegen_disable_pixel_link(struct dpu_framegen *fg);
void framegen_shdtokgen(struct dpu_framegen *fg);
void framegen_syncmode(struct dpu_framegen *fg, fgsyncmode_t mode);
void framegen_cfg_videomode(struct dpu_framegen *fg, struct drm_display_mode *m,
bool side_by_side, unsigned int encoder_type);
void framegen_pkickconfig(struct dpu_framegen *fg, bool enable);
void framegen_syncmode_fixup(struct dpu_framegen *fg, bool enable);
void framegen_displaymode(struct dpu_framegen *fg, fgdm_t mode);
void framegen_panic_displaymode(struct dpu_framegen *fg, fgdm_t mode);
void framegen_wait_done(struct dpu_framegen *fg, struct drm_display_mode *m);
void framegen_read_timestamp(struct dpu_framegen *fg,
u32 *frame_index, u32 *line_index);
void framegen_wait_for_frame_counter_moving(struct dpu_framegen *fg);
bool framegen_secondary_requests_to_read_empty_fifo(struct dpu_framegen *fg);
void framegen_secondary_clear_channel_status(struct dpu_framegen *fg);
bool framegen_secondary_is_syncup(struct dpu_framegen *fg);
void framegen_wait_for_secondary_syncup(struct dpu_framegen *fg);
void framegen_enable_clock(struct dpu_framegen *fg);
void framegen_disable_clock(struct dpu_framegen *fg);
bool framegen_is_master(struct dpu_framegen *fg);
bool framegen_is_slave(struct dpu_framegen *fg);
struct dpu_framegen *dpu_fg_get(struct dpu_soc *dpu, int id);
void dpu_fg_put(struct dpu_framegen *fg);
struct dpu_framegen *dpu_aux_fg_peek(struct dpu_framegen *fg);
/* Horizontal Scaler Unit */
struct dpu_hscaler;
int hscaler_pixengcfg_dynamic_src_sel(struct dpu_hscaler *hs, hs_src_sel_t src);
void hscaler_pixengcfg_clken(struct dpu_hscaler *hs, pixengcfg_clken_t clken);
void hscaler_shden(struct dpu_hscaler *hs, bool enable);
void hscaler_setup1(struct dpu_hscaler *hs, unsigned int src, unsigned int dst);
void hscaler_setup2(struct dpu_hscaler *hs, u32 phase_offset);
void hscaler_output_size(struct dpu_hscaler *hs, u32 line_num);
void hscaler_filter_mode(struct dpu_hscaler *hs, scaler_filter_mode_t m);
void hscaler_scale_mode(struct dpu_hscaler *hs, scaler_scale_mode_t m);
void hscaler_mode(struct dpu_hscaler *hs, scaler_mode_t m);
bool hscaler_is_enabled(struct dpu_hscaler *hs);
dpu_block_id_t hscaler_get_block_id(struct dpu_hscaler *hs);
unsigned int hscaler_get_stream_id(struct dpu_hscaler *hs);
void hscaler_set_stream_id(struct dpu_hscaler *hs, unsigned int id);
struct dpu_hscaler *dpu_hs_get(struct dpu_soc *dpu, int id);
void dpu_hs_put(struct dpu_hscaler *hs);
/* Layer Blend Unit */
struct dpu_layerblend;
int layerblend_pixengcfg_dynamic_prim_sel(struct dpu_layerblend *lb,
lb_prim_sel_t prim);
void layerblend_pixengcfg_dynamic_sec_sel(struct dpu_layerblend *lb,
lb_sec_sel_t sec);
void layerblend_pixengcfg_clken(struct dpu_layerblend *lb,
pixengcfg_clken_t clken);
void layerblend_shden(struct dpu_layerblend *lb, bool enable);
void layerblend_shdtoksel(struct dpu_layerblend *lb, lb_shadow_sel_t sel);
void layerblend_shdldsel(struct dpu_layerblend *lb, lb_shadow_sel_t sel);
void layerblend_control(struct dpu_layerblend *lb, lb_mode_t mode);
void layerblend_blendcontrol(struct dpu_layerblend *lb, unsigned int zpos,
unsigned int pixel_blend_mode, u16 alpha);
void layerblend_position(struct dpu_layerblend *lb, int x, int y);
struct dpu_layerblend *dpu_lb_get(struct dpu_soc *dpu, int id);
void dpu_lb_put(struct dpu_layerblend *lb);
/* Store Unit */
struct dpu_store;
void store_pixengcfg_syncmode_fixup(struct dpu_store *st, bool enable);
struct dpu_store *dpu_st_get(struct dpu_soc *dpu, int id);
void dpu_st_put(struct dpu_store *st);
/* Timing Controller Unit */
struct dpu_tcon;
int tcon_set_fmt(struct dpu_tcon *tcon, u32 bus_format);
void tcon_set_operation_mode(struct dpu_tcon *tcon);
void tcon_cfg_videomode(struct dpu_tcon *tcon,
struct drm_display_mode *m, bool side_by_side);
bool tcon_is_master(struct dpu_tcon *tcon);
bool tcon_is_slave(struct dpu_tcon *tcon);
void tcon_configure_pc(struct dpu_tcon *tcon, unsigned int di,
unsigned int frame_width, u32 mode, u32 format);
void tcon_enable_pc(struct dpu_tcon *tcon);
void tcon_disable_pc(struct dpu_tcon *tcon);
struct dpu_tcon *dpu_tcon_get(struct dpu_soc *dpu, int id);
void dpu_tcon_put(struct dpu_tcon *tcon);
struct dpu_tcon *dpu_aux_tcon_peek(struct dpu_tcon *tcon);
/* Vertical Scaler Unit */
struct dpu_vscaler;
int vscaler_pixengcfg_dynamic_src_sel(struct dpu_vscaler *vs, vs_src_sel_t src);
void vscaler_pixengcfg_clken(struct dpu_vscaler *vs, pixengcfg_clken_t clken);
void vscaler_shden(struct dpu_vscaler *vs, bool enable);
void vscaler_setup1(struct dpu_vscaler *vs, u32 src, u32 dst, bool deinterlace);
void vscaler_setup2(struct dpu_vscaler *vs, bool deinterlace);
void vscaler_setup3(struct dpu_vscaler *vs, bool deinterlace);
void vscaler_setup4(struct dpu_vscaler *vs, u32 phase_offset);
void vscaler_setup5(struct dpu_vscaler *vs, u32 phase_offset);
void vscaler_output_size(struct dpu_vscaler *vs, u32 line_num);
void vscaler_field_mode(struct dpu_vscaler *vs, scaler_field_mode_t m);
void vscaler_filter_mode(struct dpu_vscaler *vs, scaler_filter_mode_t m);
void vscaler_scale_mode(struct dpu_vscaler *vs, scaler_scale_mode_t m);
void vscaler_mode(struct dpu_vscaler *vs, scaler_mode_t m);
bool vscaler_is_enabled(struct dpu_vscaler *vs);
dpu_block_id_t vscaler_get_block_id(struct dpu_vscaler *vs);
unsigned int vscaler_get_stream_id(struct dpu_vscaler *vs);
void vscaler_set_stream_id(struct dpu_vscaler *vs, unsigned int id);
struct dpu_vscaler *dpu_vs_get(struct dpu_soc *dpu, int id);
void dpu_vs_put(struct dpu_vscaler *vs);
struct dpu_fetchunit *fetchdecode_get_fetcheco(struct dpu_fetchunit *fu);
struct dpu_hscaler *fetchdecode_get_hscaler(struct dpu_fetchunit *fu);
struct dpu_vscaler *fetchdecode_get_vscaler(struct dpu_fetchunit *fu);
unsigned int dpu_get_syncmode_min_prate(struct dpu_soc *dpu);
unsigned int dpu_get_singlemode_max_width(struct dpu_soc *dpu);
unsigned int dpu_get_master_stream_id(struct dpu_soc *dpu);
bool dpu_vproc_has_fetcheco_cap(u32 cap_mask);
bool dpu_vproc_has_hscale_cap(u32 cap_mask);
bool dpu_vproc_has_vscale_cap(u32 cap_mask);
u32 dpu_vproc_get_fetcheco_cap(u32 cap_mask);
u32 dpu_vproc_get_hscale_cap(u32 cap_mask);
u32 dpu_vproc_get_vscale_cap(u32 cap_mask);
unsigned int fetchunit_burst_size_fixup_tkt343664(dma_addr_t baddr);
unsigned int
fetchunit_stride_fixup_tkt339017(unsigned int stride, unsigned int burst_size,
dma_addr_t baddr, bool nonzero_mod);
void fetchunit_get_dprc(struct dpu_fetchunit *fu, void *data);
void fetchunit_shden(struct dpu_fetchunit *fu, bool enable);
void fetchunit_baddr_autoupdate(struct dpu_fetchunit *fu, u8 layer_mask);
void fetchunit_shdldreq_sticky(struct dpu_fetchunit *fu, u8 layer_mask);
void fetchunit_set_burstlength(struct dpu_fetchunit *fu,
unsigned int x_offset, unsigned int mt_w,
int bpp, dma_addr_t baddr, bool use_prefetch);
void fetchunit_set_baseaddress(struct dpu_fetchunit *fu, unsigned int width,
unsigned int x_offset, unsigned int y_offset,
unsigned int mt_w, unsigned int mt_h,
int bpp, dma_addr_t baddr);
void fetchunit_set_src_bpp(struct dpu_fetchunit *fu, int bpp);
void fetchunit_set_src_stride(struct dpu_fetchunit *fu,
unsigned int width, unsigned int x_offset,
unsigned int mt_w, int bpp, unsigned int stride,
dma_addr_t baddr, bool use_prefetch);
void fetchunit_set_pixel_blend_mode(struct dpu_fetchunit *fu,
unsigned int pixel_blend_mode, u16 alpha,
u32 fb_format);
void fetchunit_enable_src_buf(struct dpu_fetchunit *fu);
void fetchunit_disable_src_buf(struct dpu_fetchunit *fu);
bool fetchunit_is_enabled(struct dpu_fetchunit *fu);
unsigned int fetchunit_get_stream_id(struct dpu_fetchunit *fu);
void fetchunit_set_stream_id(struct dpu_fetchunit *fu, unsigned int id);
void fetchunit_pin_off(struct dpu_fetchunit *fu);
void fetchunit_unpin_off(struct dpu_fetchunit *fu);
bool fetchunit_is_pinned_off(struct dpu_fetchunit *fu);
bool fetchunit_is_fetchdecode(struct dpu_fetchunit *fu);
bool fetchunit_is_fetcheco(struct dpu_fetchunit *fu);
bool fetchunit_is_fetchlayer(struct dpu_fetchunit *fu);
bool fetchunit_is_fetchwarp(struct dpu_fetchunit *fu);
/*
* to avoid on-the-fly/hot plane resource migration
* between two display interfaces
*/
#define DPU_PLANE_SRC_TO_DISP_STREAM0 BIT(0)
#define DPU_PLANE_SRC_TO_DISP_STREAM1 BIT(1)
#define DPU_PLANE_SRC_DISABLED 0
struct dpu_plane_res {
struct dpu_extdst *ed[2];
struct dpu_fetchunit *fd[2];
struct dpu_fetchunit *fe[2];
struct dpu_fetchunit *fl[1];
struct dpu_fetchunit *fw[1];
struct dpu_framegen *fg[2];
struct dpu_hscaler *hs[2];
struct dpu_layerblend *lb[4];
struct dpu_vscaler *vs[2];
};
/*
* Each DPU plane can be a primary plane or an overlay plane
* of one of the DPU's two CRTCs.
*/
#define DPU_PLANE_SRC_FL0_ID BIT(0)
#define DPU_PLANE_SRC_FW2_ID BIT(1)
#define DPU_PLANE_SRC_FD0_ID BIT(2)
#define DPU_PLANE_SRC_FD1_ID BIT(3)
struct dpu_plane_grp {
struct dpu_plane_res res;
unsigned int hw_plane_num;
unsigned int hw_plane_fetcheco_num;
unsigned int hw_plane_hscaler_num;
unsigned int hw_plane_vscaler_num;
unsigned int id;
bool has_vproc;
/* used when assigning plane source */
struct mutex mutex;
u32 src_mask;
u32 src_a_mask;
u32 src_use_vproc_mask;
};
static inline struct dpu_plane_grp *plane_res_to_grp(struct dpu_plane_res *res)
{
return container_of(res, struct dpu_plane_grp, res);
}
struct dpu_client_platformdata {
const unsigned int stream_id;
unsigned int di_grp_id;
struct dpu_plane_grp *plane_grp;
/* Store9 could be shared bewteen display engine and blit engine */
struct dpu_store *st9;
struct device_node *of_node;
};
#endif /* __DRM_DPU_H__ */