Merge remote-tracking branch 'origin/display/dcss' into display/next

* origin/display/dcss: (25 commits)
  drm/imx/dcss: Release DTRC IRQs in case of failure
  drm/imx/dcss: fix crash in DTRC exit routine
  dt-bindings: display: imx8mq-dcss: add bindings for DTRC interrupts
  drm/imx/dcss: add support for tiled formats on overlay planes
  drm/imx/dcss: change HDR10 pipes config handling
  ...
This commit is contained in:
Dong Aisheng 2019-12-02 18:01:00 +08:00
commit 11ae29ac2e
26 changed files with 8124 additions and 1 deletions

View File

@ -0,0 +1,93 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2019 NXP
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/imx/nxp,imx8mq-dcss.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: iMX8MQ Display Controller Subsystem (DCSS)
maintainers:
- Laurentiu Palcu <laurentiu.palcu@nxp.com>
description:
The DCSS (display controller sub system) is used to source up to three
display buffers, compose them, and drive a display using HDMI 2.0a(with HDCP
2.2) or MIPI-DSI. The DCSS is intended to support up to 4kp60 displays. HDR10
image processing capabilities are included to provide a solution capable of
driving next generation high dynamic range displays.
properties:
compatible:
const: nxp,imx8mq-dcss
reg:
maxItems: 2
interrupts:
maxItems: 3
items:
- description: Context loader completion and error interrupt
- description: DTG interrupt used to signal context loader trigger time
- description: DTG interrupt for Vblank
interrupt-names:
maxItems: 3
items:
- const: ctx_ld
- const: ctxld_kick
- const: vblank
- const: dtrc_ch1
- const: dtrc_ch2
clocks:
maxItems: 5
items:
- description: Display APB clock for all peripheral PIO access interfaces
- description: Display AXI clock needed by DPR, Scaler, RTRAM_CTRL
- description: RTRAM clock
- description: Pixel clock, can be driver either by HDMI phy clock or MIPI
- description: DTRC clock, needed by video decompressor
- description: PLL source clock, usually VIDEO2_PLL, used when output is HDMI;
- description: PLL PHY reference clock, used when output is HDMI;
clock-names:
items:
- const: apb
- const: axi
- const: rtrm
- const: pix
- const: dtrc
- const: pll_src
- const: pll_phy_ref
port@0:
type: object
description: A port node pointing to a hdmi_in or mipi_in port node.
examples:
- |
dcss: display-controller@32e00000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "nxp,imx8mq-dcss";
reg = <0x32e00000 0x2d000>, <0x32e2f000 0x1000>;
interrupts = <6>, <8>, <9>, <16>, <17>;
interrupt-names = "ctx_ld", "ctxld_kick", "vblank", "dtrc_ch1", "dtrc_ch2";
interrupt-parent = <&irqsteer>;
clocks = <&clk 248>, <&clk 247>, <&clk 249>,
<&clk 254>,<&clk 122>, <&clk 266>, <&clk 267>;
clock-names = "apb", "axi", "rtrm", "pix", "dtrc",
"pll_src", "pll_phy_ref";
assigned-clocks = <&clk 107>, <&clk 109>, <&clk 266>;
assigned-clock-parents = <&clk 78>, <&clk 78>, <&clk 3>;
assigned-clock-rates = <800000000>,
<400000000>;
port@0 {
dcss_out: endpoint {
remote-endpoint = <&hdmi_in>;
};
};
};

View File

@ -98,7 +98,7 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
obj-y += imx/
obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/

View File

@ -39,3 +39,5 @@ config DRM_IMX_HDMI
depends on DRM_IMX
help
Choose this if you want to use HDMI on i.MX6.
source "drivers/gpu/drm/imx/dcss/Kconfig"

View File

@ -9,3 +9,4 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/

View File

@ -0,0 +1,7 @@
config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select RESET_CONTROLLER
select IMX_IRQSTEER
help
Choose this if you have a NXP i.MX8MQ based system and want to use the
Display Controller Subsystem. This option enables DCSS support.

View File

@ -0,0 +1,7 @@
imx-dcss-objs := dcss-drv.o dcss-dev.o dcss-blkctl.o dcss-ctxld.o dcss-dtg.o \
dcss-ss.o dcss-dpr.o dcss-scaler.o dcss-kms.o dcss-crtc.o \
dcss-plane.o dcss-dec400d.o dcss-hdr10.o dcss-wrscl.o \
dcss-rdsrc.o dcss-dtrc.o
obj-$(CONFIG_DRM_IMX_DCSS) += imx-dcss.o

View File

@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/of.h>
#include "dcss-dev.h"
#define DCSS_BLKCTL_RESET_CTRL 0x00
#define B_CLK_RESETN BIT(0)
#define APB_CLK_RESETN BIT(1)
#define P_CLK_RESETN BIT(2)
#define RTR_CLK_RESETN BIT(3)
#define DCSS_BLKCTL_CONTROL0 0x10
#define HDMI_MIPI_CLK_SEL BIT(0)
#define DISPMIX_REFCLK_SEL_POS 4
#define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4)
#define DISPMIX_PIXCLK_SEL BIT(8)
#define HDMI_SRC_SECURE_EN BIT(16)
struct dcss_blkctl {
struct device *dev;
void __iomem *base_reg;
bool hdmi_output;
};
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl)
{
if (blkctl->hdmi_output)
dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
else
dcss_writel(DISPMIX_PIXCLK_SEL,
blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN,
blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL);
}
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
{
struct dcss_blkctl *blkctl;
blkctl = devm_kzalloc(dcss->dev, sizeof(*blkctl), GFP_KERNEL);
if (!blkctl)
return -ENOMEM;
blkctl->base_reg = devm_ioremap(dcss->dev, blkctl_base, SZ_4K);
if (!blkctl->base_reg) {
dev_err(dcss->dev, "unable to remap BLK CTRL base\n");
devm_kfree(dcss->dev, blkctl);
return -ENOMEM;
}
dcss->blkctl = blkctl;
blkctl->dev = dcss->dev;
blkctl->hdmi_output = dcss->hdmi_output;
dcss_blkctl_cfg(blkctl);
return 0;
}
void dcss_blkctl_exit(struct dcss_blkctl *blkctl)
{
dcss_clr(P_CLK_RESETN | RTR_CLK_RESETN,
blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL);
if (blkctl->base_reg)
devm_iounmap(blkctl->dev, blkctl->base_reg);
devm_kfree(blkctl->dev, blkctl);
}

View File

@ -0,0 +1,256 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <linux/pm_runtime.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
static int dcss_enable_vblank(struct drm_crtc *crtc)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = crtc->dev->dev_private;
if (dcss_crtc->irq_enabled)
return 0;
dcss_crtc->irq_enabled = true;
dcss_dtg_vblank_irq_enable(dcss->dtg, true);
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
enable_irq(dcss_crtc->irq);
return 0;
}
static void dcss_disable_vblank(struct drm_crtc *crtc)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
disable_irq_nosync(dcss_crtc->irq);
dcss_dtg_vblank_irq_enable(dcss->dtg, false);
if (!dcss_dtrc_is_running(dcss->dtrc))
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false);
dcss_crtc->irq_enabled = false;
}
static const struct drm_crtc_funcs dcss_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = dcss_enable_vblank,
.disable_vblank = dcss_disable_vblank,
};
static void dcss_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
drm_crtc_vblank_on(crtc);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc));
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
}
static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
if (dcss_dtg_is_enabled(dcss->dtg))
dcss_ctxld_enable(dcss->ctxld);
}
static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct videomode vm;
drm_display_mode_to_videomode(mode, &vm);
pm_runtime_get_sync(dcss->dev);
dcss_enable_vblank(crtc);
vm.pixelclock = mode->crtc_clock * 1000;
dcss_dtg_sync_set(dcss->dtg, &vm);
dcss_ss_subsam_set(dcss->ss, dcss_crtc->output_is_yuv);
dcss_ss_sync_set(dcss->ss, &vm, mode->flags & DRM_MODE_FLAG_PHSYNC,
mode->flags & DRM_MODE_FLAG_PVSYNC);
dcss_dtg_css_set(dcss->dtg, dcss_crtc->output_is_yuv);
dcss_ss_enable(dcss->ss);
dcss_dtg_enable(dcss->dtg, true, NULL);
dcss_ctxld_enable(dcss->ctxld);
reinit_completion(&dcss_crtc->en_completion);
wait_for_completion_timeout(&dcss_crtc->en_completion,
msecs_to_jiffies(500));
}
static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
dcss_ss_disable(dcss->ss);
dcss_dtg_enable(dcss->dtg, false, &dcss_crtc->dis_completion);
dcss_ctxld_enable(dcss->ctxld);
reinit_completion(&dcss_crtc->dis_completion);
wait_for_completion_timeout(&dcss_crtc->dis_completion,
msecs_to_jiffies(100));
drm_crtc_vblank_off(crtc);
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false);
pm_runtime_put_sync(dcss->dev);
}
static const struct drm_crtc_helper_funcs dcss_helper_funcs = {
.atomic_begin = dcss_crtc_atomic_begin,
.atomic_flush = dcss_crtc_atomic_flush,
.atomic_enable = dcss_crtc_atomic_enable,
.atomic_disable = dcss_crtc_atomic_disable,
};
static irqreturn_t dcss_crtc_irq_handler(int irq, void *dev_id)
{
struct dcss_crtc *dcss_crtc = dev_id;
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
if (!dcss_dtg_vblank_irq_valid(dcss->dtg))
return IRQ_HANDLED;
complete(&dcss_crtc->en_completion);
if (dcss_ctxld_is_flushed(dcss->ctxld))
drm_crtc_handle_vblank(&dcss_crtc->base);
dcss_dtg_vblank_irq_clear(dcss->dtg);
return IRQ_HANDLED;
}
int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm)
{
struct dcss_dev *dcss = drm->dev_private;
struct platform_device *pdev = to_platform_device(dcss->dev);
int ret;
crtc->plane[0] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
DRM_PLANE_TYPE_PRIMARY, 2);
if (IS_ERR(crtc->plane[0]))
return PTR_ERR(crtc->plane[0]);
crtc->base.port = dcss->of_port;
drm_crtc_helper_add(&crtc->base, &dcss_helper_funcs);
ret = drm_crtc_init_with_planes(drm, &crtc->base, &crtc->plane[0]->base,
NULL, &dcss_crtc_funcs, NULL);
if (ret) {
dev_err(dcss->dev, "failed to init crtc\n");
return ret;
}
crtc->plane[1] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
DRM_PLANE_TYPE_OVERLAY, 1);
if (IS_ERR(crtc->plane[1]))
crtc->plane[1] = NULL;
crtc->plane[2] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
DRM_PLANE_TYPE_OVERLAY, 0);
if (IS_ERR(crtc->plane[2]))
crtc->plane[2] = NULL;
drm_plane_create_alpha_property(&crtc->plane[0]->base);
crtc->irq = platform_get_irq_byname(pdev, "vblank");
if (crtc->irq < 0) {
dev_err(dcss->dev, "unable to get vblank interrupt\n");
return crtc->irq;
}
init_completion(&crtc->en_completion);
init_completion(&crtc->dis_completion);
ret = devm_request_irq(dcss->dev, crtc->irq, dcss_crtc_irq_handler,
IRQF_TRIGGER_RISING, "dcss_drm", crtc);
if (ret) {
dev_err(dcss->dev, "irq request failed with %d.\n", ret);
return ret;
}
disable_irq(crtc->irq);
return 0;
}
void dcss_crtc_attach_color_mgmt_properties(struct dcss_crtc *crtc)
{
int i;
/* create color management properties only for video planes */
for (i = 1; i < 3; i++) {
if (crtc->plane[i]->type == DRM_PLANE_TYPE_PRIMARY)
return;
drm_plane_create_color_properties(&crtc->plane[i]->base,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709) |
BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_FULL_RANGE);
}
}
void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm)
{
struct dcss_dev *dcss = drm->dev_private;
devm_free_irq(dcss->dev, crtc->irq, crtc);
}

View File

@ -0,0 +1,452 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include "dcss-dev.h"
#define DCSS_CTXLD_DEVNAME "dcss_ctxld"
#define DCSS_CTXLD_CONTROL_STATUS 0x0
#define CTXLD_ENABLE BIT(0)
#define ARB_SEL BIT(1)
#define RD_ERR_EN BIT(2)
#define DB_COMP_EN BIT(3)
#define SB_HP_COMP_EN BIT(4)
#define SB_LP_COMP_EN BIT(5)
#define DB_PEND_SB_REC_EN BIT(6)
#define SB_PEND_DISP_ACTIVE_EN BIT(7)
#define AHB_ERR_EN BIT(8)
#define RD_ERR BIT(16)
#define DB_COMP BIT(17)
#define SB_HP_COMP BIT(18)
#define SB_LP_COMP BIT(19)
#define DB_PEND_SB_REC BIT(20)
#define SB_PEND_DISP_ACTIVE BIT(21)
#define AHB_ERR BIT(22)
#define DCSS_CTXLD_DB_BASE_ADDR 0x10
#define DCSS_CTXLD_DB_COUNT 0x14
#define DCSS_CTXLD_SB_BASE_ADDR 0x18
#define DCSS_CTXLD_SB_COUNT 0x1C
#define SB_HP_COUNT_POS 0
#define SB_HP_COUNT_MASK 0xffff
#define SB_LP_COUNT_POS 16
#define SB_LP_COUNT_MASK 0xffff0000
#define DCSS_AHB_ERR_ADDR 0x20
#define CTXLD_IRQ_NAME "ctx_ld"
#define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP)
#define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR)
/* The following sizes are in context loader entries, 8 bytes each. */
#define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */
#define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */
#define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */
#define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \
CTXLD_SB_HP_CTX_ENTRIES)
/* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
static u16 dcss_ctxld_ctx_size[3] = {
CTXLD_DB_CTX_ENTRIES,
CTXLD_SB_HP_CTX_ENTRIES,
CTXLD_SB_LP_CTX_ENTRIES
};
/* this represents an entry in the context loader map */
struct dcss_ctxld_item {
u32 val;
u32 ofs;
};
#define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item)
struct dcss_ctxld {
struct device *dev;
void __iomem *ctxld_reg;
int irq;
bool irq_en;
struct dcss_ctxld_item *db[2];
struct dcss_ctxld_item *sb_hp[2];
struct dcss_ctxld_item *sb_lp[2];
dma_addr_t db_paddr[2];
dma_addr_t sb_paddr[2];
u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
u8 current_ctx;
bool in_use;
bool armed;
spinlock_t lock; /* protects concurent access to private data */
void (*dtg_disable_cb)(void *data);
void *dtg_disable_data;
};
static int __dcss_ctxld_enable(struct dcss_ctxld *ctxld);
static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
{
struct dcss_ctxld *ctxld = data;
u32 irq_status;
irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
if (irq_status & CTXLD_IRQ_COMPLETION &&
!(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
ctxld->in_use = false;
if (ctxld->dtg_disable_cb) {
ctxld->dtg_disable_cb(ctxld->dtg_disable_data);
ctxld->dtg_disable_cb = NULL;
ctxld->dtg_disable_data = NULL;
}
} else if (irq_status & CTXLD_IRQ_ERROR) {
/*
* Except for throwing an error message and clearing the status
* register, there's not much we can do here.
*/
dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
irq_status);
dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
}
dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
return IRQ_HANDLED;
}
static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
struct platform_device *pdev)
{
int ret;
ctxld->irq = platform_get_irq_byname(pdev, CTXLD_IRQ_NAME);
if (ctxld->irq < 0) {
dev_err(ctxld->dev, "ctxld: can't get irq number\n");
return ctxld->irq;
}
ret = devm_request_irq(ctxld->dev, ctxld->irq,
dcss_ctxld_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
DCSS_CTXLD_DEVNAME, ctxld);
if (ret) {
dev_err(ctxld->dev, "ctxld: irq request failed.\n");
return ret;
}
ctxld->irq_en = true;
return 0;
}
void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
{
dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
}
static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
{
struct dcss_ctxld_item *ctx;
int i;
for (i = 0; i < 2; i++) {
if (ctxld->db[i]) {
dmam_free_coherent(ctxld->dev,
CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
ctxld->db[i], ctxld->db_paddr[i]);
ctxld->db[i] = NULL;
ctxld->db_paddr[i] = 0;
}
if (ctxld->sb_hp[i]) {
dmam_free_coherent(ctxld->dev,
CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
ctxld->sb_hp[i], ctxld->sb_paddr[i]);
ctxld->sb_hp[i] = NULL;
ctxld->sb_paddr[i] = 0;
}
}
}
static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
{
struct dcss_ctxld_item *ctx;
int i;
dma_addr_t dma_handle;
for (i = 0; i < 2; i++) {
ctx = dmam_alloc_coherent(ctxld->dev,
CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
&dma_handle, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctxld->db[i] = ctx;
ctxld->db_paddr[i] = dma_handle;
ctx = dmam_alloc_coherent(ctxld->dev,
CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
&dma_handle, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctxld->sb_hp[i] = ctx;
ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
ctxld->sb_paddr[i] = dma_handle;
}
return 0;
}
int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
{
struct dcss_ctxld *ctxld;
int ret;
ctxld = devm_kzalloc(dcss->dev, sizeof(struct dcss_ctxld),
GFP_KERNEL);
if (!ctxld)
return -ENOMEM;
dcss->ctxld = ctxld;
ctxld->dev = dcss->dev;
spin_lock_init(&ctxld->lock);
ret = dcss_ctxld_alloc_ctx(ctxld);
if (ret) {
dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
goto err;
}
ctxld->ctxld_reg = devm_ioremap(dcss->dev, ctxld_base, SZ_4K);
if (!ctxld->ctxld_reg) {
dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
ret = -ENOMEM;
goto err;
}
ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
if (ret)
goto err_irq;
dcss_ctxld_hw_cfg(ctxld);
return 0;
err_irq:
devm_iounmap(ctxld->dev, ctxld->ctxld_reg);
err:
dcss_ctxld_free_ctx(ctxld);
devm_kfree(ctxld->dev, ctxld);
return ret;
}
void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
{
devm_free_irq(ctxld->dev, ctxld->irq, ctxld);
if (ctxld->ctxld_reg)
devm_iounmap(ctxld->dev, ctxld->ctxld_reg);
dcss_ctxld_free_ctx(ctxld);
devm_kfree(ctxld->dev, ctxld);
}
static int __dcss_ctxld_enable(struct dcss_ctxld *ctxld)
{
int curr_ctx = ctxld->current_ctx;
u32 db_base, sb_base, sb_count;
u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
dcss_dpr_write_sysctrl(dcss->dpr);
dcss_scaler_write_sclctrl(dcss->scaler);
if (dcss_dtrc_is_running(dcss->dtrc)) {
dcss_dtrc_switch_banks(dcss->dtrc);
ctxld->armed = true;
}
sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
/* make sure SB_LP context area comes after SB_HP */
if (sb_lp_cnt &&
ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
struct dcss_ctxld_item *sb_lp_adjusted;
sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
sb_lp_cnt * CTX_ITEM_SIZE);
}
db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
if (sb_hp_cnt)
sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
else
sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
/* enable the context loader */
dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
ctxld->in_use = true;
/*
* Toggle the current context to the alternate one so that any updates
* in the modules' settings take place there.
*/
ctxld->current_ctx ^= 1;
ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
return 0;
}
int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
{
unsigned long flags;
spin_lock_irqsave(&ctxld->lock, flags);
ctxld->armed = true;
spin_unlock_irqrestore(&ctxld->lock, flags);
return 0;
}
void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
{
unsigned long flags;
spin_lock_irqsave(&ctxld->lock, flags);
if (ctxld->armed && !ctxld->in_use) {
ctxld->armed = false;
__dcss_ctxld_enable(ctxld);
}
spin_unlock_irqrestore(&ctxld->lock, flags);
}
void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
u32 reg_ofs)
{
int curr_ctx = ctxld->current_ctx;
struct dcss_ctxld_item *ctx[] = {
[CTX_DB] = ctxld->db[curr_ctx],
[CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
[CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
};
int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
WARN_ON(1);
return;
}
ctx[ctx_id][item_idx].val = val;
ctx[ctx_id][item_idx].ofs = reg_ofs;
ctxld->ctx_size[curr_ctx][ctx_id] += 1;
}
void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
u32 val, u32 reg_ofs)
{
unsigned long flags;
spin_lock_irqsave(&ctxld->lock, flags);
dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
spin_unlock_irqrestore(&ctxld->lock, flags);
}
bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
{
return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
}
int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
{
dcss_ctxld_hw_cfg(ctxld);
if (!ctxld->irq_en) {
enable_irq(ctxld->irq);
ctxld->irq_en = true;
}
return 0;
}
int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
{
int ret = 0;
int wait_time_ms = 0;
unsigned long flags;
dcss_ctxld_kick(ctxld);
while (ctxld->in_use && wait_time_ms < 500) {
msleep(20);
wait_time_ms += 20;
}
if (wait_time_ms > 500)
return -ETIMEDOUT;
spin_lock_irqsave(&ctxld->lock, flags);
if (ctxld->irq_en) {
disable_irq_nosync(ctxld->irq);
ctxld->irq_en = false;
}
/* reset context region and sizes */
ctxld->current_ctx = 0;
ctxld->ctx_size[0][CTX_DB] = 0;
ctxld->ctx_size[0][CTX_SB_HP] = 0;
ctxld->ctx_size[0][CTX_SB_LP] = 0;
spin_unlock_irqrestore(&ctxld->lock, flags);
return ret;
}
void dcss_ctxld_register_dtg_disable_cb(struct dcss_ctxld *ctxld,
void (*cb)(void *),
void *data)
{
ctxld->dtg_disable_cb = cb;
ctxld->dtg_disable_data = data;
}

View File

@ -0,0 +1,270 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <drm/drm_fourcc.h>
#include "dcss-dev.h"
/* DEC400D registers offsets */
#define DEC400D_READCONFIG_BASE 0x800
#define DEC400D_READCONFIG(i) (DEC400D_READCONFIG_BASE + ((i) << 2))
#define COMPRESSION_ENABLE_BIT BIT(0)
#define COMPRESSION_FORMAT_POS 3
#define COMPRESSION_ALIGN_MODE_POS 16
#define TILE_ALIGN_MODE_POS 22
#define TILE_MODE_POS 25
#define DEC400D_READBUFFERBASE0 0x900
#define DEC400D_READCACHEBASE0 0x980
#define DEC400D_CONTROL 0xB00
#define DEC400D_CLEAR 0xB80
#define DEC400D_READBUFFERBASE0 0x900
#define DEC400D_READCACHEBASE0 0x980
#define DEC400D_CONTROL 0xB00
#define DISABLE_COMPRESSION_BIT BIT(1)
#define SHADOW_TRIGGER_BIT BIT(29)
#define DEC400_CFMT_ARGB8 0x0
#define DEC400_CFMT_XRGB8 0x1
#define DEC400_CFMT_AYUV 0x2
#define DEC400_CFMT_UYVY 0x3
#define DEC400_CFMT_YUY2 0x4
#define DEC400_CFMT_YUV_ONLY 0x5
#define DEC400_CFMT_UV_MIX 0x6
#define DEC400_CFMT_ARGB4 0x7
#define DEC400_CFMT_XRGB4 0x8
#define DEC400_CFMT_A1R5G5B5 0x9
#define DEC400_CFMT_X1R5G5B5 0xA
#define DEC400_CFMT_R5G6B5 0xB
#define DEC400_CFMT_Z24S8 0xC
#define DEC400_CFMT_Z24 0xD
#define DEC400_CFMT_Z16 0xE
#define DEC400_CFMT_A2R10G10B10 0xF
#define DEC400_CFMT_BAYER 0x10
#define DEC400_CFMT_SIGNED_BAYER 0x11
struct dcss_dec400d {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
bool bypass; /* bypass or decompress */
};
static void dcss_dec400d_write(struct dcss_dec400d *dec400d,
u32 value,
u32 offset)
{
dcss_ctxld_write(dec400d->ctxld, dec400d->ctx_id,
value, dec400d->base_ofs + offset);
}
int dcss_dec400d_init(struct dcss_dev *dcss, unsigned long dec400d_base)
{
struct dcss_dec400d *dec400d;
int ret;
dec400d = devm_kzalloc(dcss->dev, sizeof(*dec400d), GFP_KERNEL);
if (!dec400d)
return -ENOMEM;
dcss->dec400d = dec400d;
dec400d->dev = dcss->dev;
dec400d->ctxld = dcss->ctxld;
dec400d->base_reg = devm_ioremap(dcss->dev, dec400d_base, SZ_4K);
if (!dec400d->base_reg) {
dev_err(dcss->dev, "dec400d: unable to remap dec400d base\n");
ret = -ENOMEM;
goto free_mem;
}
dec400d->base_ofs = dec400d_base;
dec400d->ctx_id = CTX_SB_HP;
return 0;
free_mem:
devm_kfree(dcss->dev, dcss->dec400d);
return ret;
}
void dcss_dec400d_exit(struct dcss_dec400d *dec400d)
{
if (dec400d->base_reg)
devm_iounmap(dec400d->dev, dec400d->base_reg);
devm_kfree(dec400d->dev, dec400d);
}
void dcss_dec400d_read_config(struct dcss_dec400d *dec400d,
u32 read_id,
bool compress_en,
u32 compress_format)
{
u32 cformat = 0;
u32 read_config = 0x0;
/* TODO: using 'read_id' 0 by default */
if (read_id) {
WARN_ON(1);
return;
}
if (!compress_en)
goto config;
switch (compress_format) {
case _VIV_CFMT_ARGB8:
cformat = DEC400_CFMT_ARGB8;
break;
case _VIV_CFMT_XRGB8:
cformat = DEC400_CFMT_XRGB8;
break;
case _VIV_CFMT_AYUV:
cformat = DEC400_CFMT_AYUV;
break;
case _VIV_CFMT_UYVY:
cformat = DEC400_CFMT_UYVY;
break;
case _VIV_CFMT_YUY2:
cformat = DEC400_CFMT_YUY2;
break;
case _VIV_CFMT_YUV_ONLY:
cformat = DEC400_CFMT_YUV_ONLY;
break;
case _VIV_CFMT_UV_MIX:
cformat = DEC400_CFMT_UV_MIX;
break;
case _VIV_CFMT_ARGB4:
cformat = DEC400_CFMT_ARGB4;
break;
case _VIV_CFMT_XRGB4:
cformat = DEC400_CFMT_XRGB4;
break;
case _VIV_CFMT_A1R5G5B5:
cformat = DEC400_CFMT_A1R5G5B5;
break;
case _VIV_CFMT_X1R5G5B5:
cformat = DEC400_CFMT_X1R5G5B5;
break;
case _VIV_CFMT_R5G6B5:
cformat = DEC400_CFMT_R5G6B5;
break;
case _VIV_CFMT_Z24S8:
cformat = DEC400_CFMT_Z24S8;
break;
case _VIV_CFMT_Z24:
cformat = DEC400_CFMT_Z24;
break;
case _VIV_CFMT_Z16:
cformat = DEC400_CFMT_Z16;
break;
case _VIV_CFMT_A2R10G10B10:
cformat = DEC400_CFMT_A2R10G10B10;
break;
case _VIV_CFMT_BAYER:
cformat = DEC400_CFMT_BAYER;
break;
case _VIV_CFMT_SIGNED_BAYER:
cformat = DEC400_CFMT_SIGNED_BAYER;
break;
default:
/* TODO: not support yet */
WARN_ON(1);
return;
}
/* Dec compress format */
read_config |= cformat << COMPRESSION_FORMAT_POS;
/* ALIGN32_BYTE */
read_config |= 0x2 << COMPRESSION_ALIGN_MODE_POS;
/* TILE1_ALIGN */
read_config |= 0x0 << TILE_ALIGN_MODE_POS;
/* TILE8x4 */
read_config |= 0x3 << TILE_MODE_POS;
/* Compression Enable */
read_config |= COMPRESSION_ENABLE_BIT;
config:
dcss_dec400d_write(dec400d, read_config, DEC400D_READCONFIG(read_id));
}
void dcss_dec400d_bypass(struct dcss_dec400d *dec400d)
{
u32 control;
dcss_dec400d_read_config(dec400d, 0, false, 0);
control = dcss_readl(dec400d->base_reg + DEC400D_CONTROL);
dev_dbg(dec400d->dev, "%s: dec400d control = %#x\n", __func__, control);
control |= DISABLE_COMPRESSION_BIT;
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
/* Trigger shadow registers */
control |= SHADOW_TRIGGER_BIT;
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
dec400d->bypass = true;
}
void dcss_dec400d_shadow_trig(struct dcss_dec400d *dec400d)
{
u32 control;
/* do nothing */
if (dec400d->bypass)
return;
control = dcss_readl(dec400d->base_reg + DEC400D_CONTROL);
/* Trigger shadow registers */
control |= SHADOW_TRIGGER_BIT;
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
}
void dcss_dec400d_addr_set(struct dcss_dec400d *dec400d, u32 baddr, u32 caddr)
{
/* set frame buffer base addr */
dcss_dec400d_write(dec400d, baddr, DEC400D_READBUFFERBASE0);
/* set tile status cache addr */
dcss_dec400d_write(dec400d, caddr, DEC400D_READCACHEBASE0);
dec400d->bypass = false;
}
void dcss_dec400d_fast_clear_config(struct dcss_dec400d *dec400d,
u32 fc_value,
bool enable)
{
dcss_dec400d_write(dec400d, fc_value, DEC400D_CLEAR);
}
void dcss_dec400d_enable(struct dcss_dec400d *dec400d)
{
u32 control;
if (dec400d->bypass)
return;
control = dcss_readl(dec400d->base_reg + DEC400D_CONTROL);
/* enable compression */
control &= ~(DISABLE_COMPRESSION_BIT);
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
/* Trigger shadow registers */
control |= SHADOW_TRIGGER_BIT;
dcss_dec400d_write(dec400d, control, DEC400D_CONTROL);
}

View File

@ -0,0 +1,369 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/busfreq-imx.h>
#include <drm/drm_modeset_helper.h>
#include "dcss-dev.h"
static void dcss_clocks_enable(struct dcss_dev *dcss)
{
if (dcss->clks_on)
return;
if (dcss->hdmi_output) {
clk_prepare_enable(dcss->pll_phy_ref_clk);
clk_prepare_enable(dcss->pll_src_clk);
}
clk_prepare_enable(dcss->axi_clk);
clk_prepare_enable(dcss->apb_clk);
clk_prepare_enable(dcss->rtrm_clk);
clk_prepare_enable(dcss->dtrc_clk);
clk_prepare_enable(dcss->pix_clk);
dcss->clks_on = true;
}
static void dcss_clocks_disable(struct dcss_dev *dcss)
{
if (!dcss->clks_on)
return;
clk_disable_unprepare(dcss->pix_clk);
clk_disable_unprepare(dcss->dtrc_clk);
clk_disable_unprepare(dcss->rtrm_clk);
clk_disable_unprepare(dcss->apb_clk);
clk_disable_unprepare(dcss->axi_clk);
if (dcss->hdmi_output) {
clk_disable_unprepare(dcss->pll_src_clk);
clk_disable_unprepare(dcss->pll_phy_ref_clk);
}
dcss->clks_on = false;
}
static void dcss_busfreq_enable(struct dcss_dev *dcss)
{
if (dcss->bus_freq_on)
return;
request_bus_freq(BUS_FREQ_HIGH);
dcss->bus_freq_on = true;
}
static void dcss_busfreq_disable(struct dcss_dev *dcss)
{
if (!dcss->bus_freq_on)
return;
release_bus_freq(BUS_FREQ_HIGH);
dcss->bus_freq_on = false;
}
static int dcss_submodules_init(struct dcss_dev *dcss)
{
int ret = 0;
u32 base_addr = dcss->start_addr;
const struct dcss_type_data *devtype = dcss->devtype;
dcss_clocks_enable(dcss);
ret = dcss_blkctl_init(dcss, base_addr + devtype->blkctl_ofs);
if (ret)
return ret;
ret = dcss_ctxld_init(dcss, base_addr + devtype->ctxld_ofs);
if (ret)
goto ctxld_err;
ret = dcss_dtg_init(dcss, base_addr + devtype->dtg_ofs);
if (ret)
goto dtg_err;
ret = dcss_ss_init(dcss, base_addr + devtype->ss_ofs);
if (ret)
goto ss_err;
ret = dcss_dtrc_init(dcss, base_addr + devtype->dtrc_ofs);
if (ret)
goto dtrc_err;
ret = dcss_dpr_init(dcss, base_addr + devtype->dpr_ofs);
if (ret)
goto dpr_err;
ret = dcss_wrscl_init(dcss, base_addr + devtype->wrscl_ofs);
if (ret)
goto wrscl_err;
ret = dcss_rdsrc_init(dcss, base_addr + devtype->rdsrc_ofs);
if (ret)
goto rdsrc_err;
ret = dcss_scaler_init(dcss, base_addr + devtype->scaler_ofs);
if (ret)
goto scaler_err;
ret = dcss_dec400d_init(dcss, base_addr + devtype->dec400d_ofs);
if (ret)
goto dec400d_err;
ret = dcss_hdr10_init(dcss, base_addr + devtype->hdr10_ofs);
if (ret)
goto hdr10_err;
return 0;
hdr10_err:
dcss_dec400d_exit(dcss->dec400d);
dec400d_err:
dcss_scaler_exit(dcss->scaler);
scaler_err:
dcss_rdsrc_exit(dcss->rdsrc);
rdsrc_err:
dcss_wrscl_exit(dcss->wrscl);
wrscl_err:
dcss_dpr_exit(dcss->dpr);
dpr_err:
dcss_dtrc_exit(dcss->dtrc);
dtrc_err:
dcss_ss_exit(dcss->ss);
ss_err:
dcss_dtg_exit(dcss->dtg);
dtg_err:
dcss_ctxld_exit(dcss->ctxld);
ctxld_err:
dcss_blkctl_exit(dcss->blkctl);
dcss_clocks_disable(dcss);
return ret;
}
static void dcss_submodules_stop(struct dcss_dev *dcss)
{
dcss_clocks_enable(dcss);
dcss_hdr10_exit(dcss->hdr10);
dcss_dec400d_exit(dcss->dec400d);
dcss_scaler_exit(dcss->scaler);
dcss_rdsrc_exit(dcss->rdsrc);
dcss_wrscl_exit(dcss->wrscl);
dcss_dpr_exit(dcss->dpr);
dcss_dtrc_exit(dcss->dtrc);
dcss_ss_exit(dcss->ss);
dcss_dtg_exit(dcss->dtg);
dcss_ctxld_exit(dcss->ctxld);
dcss_blkctl_exit(dcss->blkctl);
dcss_clocks_disable(dcss);
}
static int dcss_clks_init(struct dcss_dev *dcss)
{
int i;
struct {
const char *id;
struct clk **clk;
bool required;
} clks[] = {
{"apb", &dcss->apb_clk, true},
{"axi", &dcss->axi_clk, true},
{"pix", &dcss->pix_clk, true},
{"rtrm", &dcss->rtrm_clk, true},
{"dtrc", &dcss->dtrc_clk, true},
{"pll_src", &dcss->pll_src_clk, dcss->hdmi_output},
{"pll_phy_ref", &dcss->pll_phy_ref_clk, dcss->hdmi_output},
};
for (i = 0; i < ARRAY_SIZE(clks); i++) {
*clks[i].clk = devm_clk_get(dcss->dev, clks[i].id);
if (IS_ERR(*clks[i].clk) && clks[i].required) {
dev_err(dcss->dev, "failed to get %s clock\n",
clks[i].id);
return PTR_ERR(*clks[i].clk);
}
}
dcss->clks_on = false;
return 0;
}
static void dcss_clks_release(struct dcss_dev *dcss)
{
devm_clk_put(dcss->dev, dcss->dtrc_clk);
devm_clk_put(dcss->dev, dcss->rtrm_clk);
devm_clk_put(dcss->dev, dcss->pix_clk);
devm_clk_put(dcss->dev, dcss->axi_clk);
devm_clk_put(dcss->dev, dcss->apb_clk);
}
struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
{
struct platform_device *pdev = to_platform_device(dev);
int ret;
struct resource *res;
struct dcss_dev *dcss;
const struct dcss_type_data *devtype;
devtype = of_device_get_match_data(dev);
if (!devtype) {
dev_err(dev, "no device match found\n");
return ERR_PTR(-ENODEV);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "cannot get memory resource\n");
return ERR_PTR(-EINVAL);
}
dcss = devm_kzalloc(dev, sizeof(struct dcss_dev), GFP_KERNEL);
if (!dcss)
return ERR_PTR(-ENOMEM);
dcss->dev = dev;
dcss->devtype = devtype;
dcss->hdmi_output = hdmi_output;
ret = dcss_clks_init(dcss);
if (ret) {
dev_err(dev, "clocks initialization failed\n");
goto err;
}
dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
if (!dcss->of_port) {
dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name);
ret = -ENODEV;
goto clks_err;
}
dcss->start_addr = res->start;
ret = dcss_submodules_init(dcss);
if (ret) {
dev_err(dev, "submodules initialization failed\n");
goto clks_err;
}
pm_runtime_enable(dev);
return dcss;
clks_err:
dcss_clks_release(dcss);
err:
devm_kfree(dcss->dev, dcss);
return ERR_PTR(ret);
}
void dcss_dev_destroy(struct dcss_dev *dcss)
{
pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss);
dcss_clks_release(dcss);
devm_kfree(dcss->dev, dcss);
}
#ifdef CONFIG_PM_SLEEP
int dcss_dev_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
int ret;
drm_mode_config_helper_suspend(dcss_drv_dev_to_drm(dev));
if (pm_runtime_suspended(dev))
return 0;
ret = dcss_ctxld_suspend(dcss->ctxld);
if (ret)
return ret;
dcss_clocks_disable(dcss);
dcss_busfreq_disable(dcss);
return 0;
}
int dcss_dev_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
if (pm_runtime_suspended(dev)) {
drm_mode_config_helper_resume(dcss_drv_dev_to_drm(dev));
return 0;
}
dcss_busfreq_enable(dcss);
dcss_clocks_enable(dcss);
dcss_blkctl_cfg(dcss->blkctl);
dcss_ctxld_resume(dcss->ctxld);
drm_mode_config_helper_resume(dcss_drv_dev_to_drm(dev));
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
int dcss_dev_runtime_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
int ret;
ret = dcss_ctxld_suspend(dcss->ctxld);
if (ret)
return ret;
dcss_clocks_disable(dcss);
dcss_busfreq_disable(dcss);
return 0;
}
int dcss_dev_runtime_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
dcss_busfreq_enable(dcss);
dcss_clocks_enable(dcss);
dcss_blkctl_cfg(dcss->blkctl);
dcss_ctxld_resume(dcss->ctxld);
return 0;
}
#endif /* CONFIG_PM */

View File

@ -0,0 +1,359 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 NXP.
*/
#ifndef __DCSS_PRV_H__
#define __DCSS_PRV_H__
#include <drm/drm_atomic.h>
#include <drm/drm_fourcc.h>
#include <linux/io.h>
#include <video/videomode.h>
#define SET 0x04
#define CLR 0x08
#define TGL 0x0C
#define dcss_writel(v, c) writel((v), (c))
#define dcss_readl(c) readl(c)
#define dcss_set(v, c) writel((v), (c) + SET)
#define dcss_clr(v, c) writel((v), (c) + CLR)
#define dcss_toggle(v, c) writel((v), (c) + TGL)
static inline void dcss_update(u32 v, u32 m, void __iomem *c)
{
writel((readl(c) & ~(m)) | (v), (c));
}
#define DCSS_DBG_REG(reg) {.name = #reg, .ofs = reg}
enum {
DCSS_IMX8MQ = 0,
};
struct dcss_type_data {
const char *name;
u32 blkctl_ofs;
u32 ctxld_ofs;
u32 rdsrc_ofs;
u32 wrscl_ofs;
u32 dtg_ofs;
u32 scaler_ofs;
u32 ss_ofs;
u32 dpr_ofs;
u32 dtrc_ofs;
u32 dec400d_ofs;
u32 hdr10_ofs;
};
struct dcss_debug_reg {
char *name;
u32 ofs;
};
enum dcss_ctxld_ctx_type {
CTX_DB,
CTX_SB_HP, /* high-priority */
CTX_SB_LP, /* low-priority */
};
struct dcss_dev {
struct device *dev;
const struct dcss_type_data *devtype;
struct device_node *of_port;
u32 start_addr;
struct dcss_blkctl *blkctl;
struct dcss_ctxld *ctxld;
struct dcss_dpr *dpr;
struct dcss_dtg *dtg;
struct dcss_ss *ss;
struct dcss_hdr10 *hdr10;
struct dcss_scaler *scaler;
struct dcss_dtrc *dtrc;
struct dcss_dec400d *dec400d;
struct dcss_wrscl *wrscl;
struct dcss_rdsrc *rdsrc;
struct clk *apb_clk;
struct clk *axi_clk;
struct clk *pix_clk;
struct clk *rtrm_clk;
struct clk *dtrc_clk;
struct clk *pll_src_clk;
struct clk *pll_phy_ref_clk;
void (*dcss_disable_callback)(void *data);
bool clks_on;
bool bus_freq_on;
bool hdmi_output;
};
enum dcss_color_space {
DCSS_COLORSPACE_RGB,
DCSS_COLORSPACE_YUV,
DCSS_COLORSPACE_UNKNOWN,
};
struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev);
struct drm_device *dcss_drv_dev_to_drm(struct device *dev);
struct dcss_dev *dcss_dev_create(struct device *dev, bool mipi_output);
void dcss_dev_destroy(struct dcss_dev *dcss);
int dcss_dev_runtime_suspend(struct device *dev);
int dcss_dev_runtime_resume(struct device *dev);
int dcss_dev_suspend(struct device *dev);
int dcss_dev_resume(struct device *dev);
/* BLKCTL */
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
void dcss_blkctl_exit(struct dcss_blkctl *blkctl);
/* CTXLD */
int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base);
void dcss_ctxld_exit(struct dcss_ctxld *ctxld);
void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
u32 val, u32 reg_idx);
int dcss_ctxld_resume(struct dcss_ctxld *dcss_ctxld);
int dcss_ctxld_suspend(struct dcss_ctxld *dcss_ctxld);
void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctlxd, u32 ctx_id, u32 val,
u32 reg_ofs);
void dcss_ctxld_kick(struct dcss_ctxld *ctxld);
bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld);
int dcss_ctxld_enable(struct dcss_ctxld *ctxld);
void dcss_ctxld_register_dtg_disable_cb(struct dcss_ctxld *ctxld,
void (*cb)(void *),
void *data);
void dcss_ctxld_register_dtrc_cb(struct dcss_ctxld *ctxld,
bool (*cb)(void *),
void *data);
/* DPR */
enum dcss_tile_type {
TILE_LINEAR = 0,
TILE_GPU_STANDARD,
TILE_GPU_SUPER,
TILE_VPU_YUV420,
TILE_VPU_VP9,
};
enum dcss_pix_size {
PIX_SIZE_8,
PIX_SIZE_16,
PIX_SIZE_32,
};
int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base);
void dcss_dpr_exit(struct dcss_dpr *dpr);
void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr);
void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres);
void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
u32 chroma_base_addr, u16 pitch);
void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en);
void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
const struct drm_format_info *format, u64 modifier);
void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation);
/* DTG */
int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base);
void dcss_dtg_exit(struct dcss_dtg *dtg);
bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg);
void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en);
void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg);
void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm);
void dcss_dtg_css_set(struct dcss_dtg *dtg, bool out_is_yuv);
void dcss_dtg_enable(struct dcss_dtg *dtg, bool en,
struct completion *dis_completion);
bool dcss_dtg_is_enabled(struct dcss_dtg *dtg);
void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en);
bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha);
void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
const struct drm_format_info *format, int alpha);
void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
int px, int py, int pw, int ph);
void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en);
/* SUBSAM */
int dcss_ss_init(struct dcss_dev *dcss, unsigned long subsam_base);
void dcss_ss_exit(struct dcss_ss *ss);
void dcss_ss_enable(struct dcss_ss *ss);
void dcss_ss_disable(struct dcss_ss *ss);
void dcss_ss_subsam_set(struct dcss_ss *ss, bool output_is_yuv);
void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
bool phsync, bool pvsync);
/* SCALER */
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base);
void dcss_scaler_exit(struct dcss_scaler *scl);
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
u32 vrefresh_hz);
void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en);
int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
int *min, int *max);
void dcss_scaler_write_sclctrl(struct dcss_scaler *scl);
/* DEC400D */
#define VIV_VIDMEM_METADATA_MAGIC fourcc_code('v', 'i', 'v', 'm')
/* Compressed format now was defined same as dec400d, should be general. */
typedef enum _VIV_COMPRESS_FMT
{
_VIV_CFMT_ARGB8 = 0,
_VIV_CFMT_XRGB8,
_VIV_CFMT_AYUV,
_VIV_CFMT_UYVY,
_VIV_CFMT_YUY2,
_VIV_CFMT_YUV_ONLY,
_VIV_CFMT_UV_MIX,
_VIV_CFMT_ARGB4,
_VIV_CFMT_XRGB4,
_VIV_CFMT_A1R5G5B5,
_VIV_CFMT_X1R5G5B5,
_VIV_CFMT_R5G6B5,
_VIV_CFMT_Z24S8,
_VIV_CFMT_Z24,
_VIV_CFMT_Z16,
_VIV_CFMT_A2R10G10B10,
_VIV_CFMT_BAYER,
_VIV_CFMT_SIGNED_BAYER,
_VIV_CFMT_VAA16,
_VIV_CFMT_S8,
_VIV_CFMT_MAX,
} _VIV_COMPRESS_FMT;
/* Metadata for cross-device fd share with additional (ts) info. */
typedef struct _VIV_VIDMEM_METADATA
{
uint32_t magic;
int32_t ts_fd;
void * ts_dma_buf;
uint32_t fc_enabled;
uint32_t fc_value;
uint32_t fc_value_upper;
uint32_t compressed;
uint32_t compress_format;
} _VIV_VIDMEM_METADATA;
int dcss_dec400d_init(struct dcss_dev *dcss, unsigned long dec400d_base);
void dcss_dec400d_exit(struct dcss_dec400d *dec400d);
void dcss_dec400d_bypass(struct dcss_dec400d *dec400d);
void dcss_dec400d_shadow_trig(struct dcss_dec400d *dec400d);
void dcss_dec400d_enable(struct dcss_dec400d *dec400d);
void dcss_dec400d_fast_clear_config(struct dcss_dec400d *dec400d,
u32 fc_value,
bool enable);
void dcss_dec400d_read_config(struct dcss_dec400d *dec400d,
u32 read_id,
bool compress_en,
u32 compress_format);
void dcss_dec400d_addr_set(struct dcss_dec400d *dec400d, u32 baddr, u32 caddr);
/* HDR10 */
enum dcss_hdr10_nonlinearity {
NL_REC2084,
NL_REC709,
NL_BT1886,
NL_2100HLG,
NL_SRGB,
};
enum dcss_hdr10_pixel_range {
PR_LIMITED,
PR_FULL,
};
enum dcss_hdr10_gamut {
G_REC2020,
G_REC709,
G_REC601_NTSC,
G_REC601_PAL,
G_ADOBE_ARGB,
};
struct dcss_hdr10_pipe_cfg {
bool is_yuv;
enum dcss_hdr10_nonlinearity nl;
enum dcss_hdr10_pixel_range pr;
enum dcss_hdr10_gamut g;
};
int dcss_hdr10_init(struct dcss_dev *dcss, unsigned long hdr10_base);
void dcss_hdr10_exit(struct dcss_hdr10 *hdr10);
void dcss_hdr10_setup(struct dcss_hdr10 *hdr10, int ch_num,
struct dcss_hdr10_pipe_cfg *ipipe_cfg,
struct dcss_hdr10_pipe_cfg *opipe_cfg);
/* enums common to both WRSCL and RDSRC */
enum dcss_wrscl_rdsrc_psize {
PSIZE_64,
PSIZE_128,
PSIZE_256,
PSIZE_512,
PSIZE_1024,
PSIZE_2048,
PSIZE_4096,
};
enum dcss_wrscl_rdsrc_tsize {
TSIZE_64,
TSIZE_128,
TSIZE_256,
TSIZE_512,
};
enum dcss_wrscl_rdsrc_fifo_size {
FIFO_512,
FIFO_1024,
FIFO_2048,
FIFO_4096,
};
enum dcss_wrscl_rdsrc_bpp {
BPP_38, /* 38 bit unpacked components */
BPP_32_UPCONVERT,
BPP_32_10BIT_OUTPUT,
BPP_20, /* 10-bit YUV422 */
BPP_16, /* 8-bit YUV422 */
};
/* WRSCL */
int dcss_wrscl_init(struct dcss_dev *dcss, unsigned long wrscl_base);
void dcss_wrscl_exit(struct dcss_wrscl *wrscl);
u32 dcss_wrscl_setup(struct dcss_wrscl *wrscl, u32 pix_format, u32 pix_clk_hz,
u32 dst_xres, u32 dst_yres);
void dcss_wrscl_enable(struct dcss_wrscl *wrscl);
void dcss_wrscl_disable(struct dcss_wrscl *wrscl);
/* RDSRC */
int dcss_rdsrc_init(struct dcss_dev *dcss, unsigned long rdsrc_base);
void dcss_rdsrc_exit(struct dcss_rdsrc *rdsrc);
void dcss_rdsrc_setup(struct dcss_rdsrc *rdsrc, u32 pix_format, u32 dst_xres,
u32 dst_yres, u32 base_addr);
void dcss_rdsrc_enable(struct dcss_rdsrc *rdsrc);
void dcss_rdsrc_disable(struct dcss_rdsrc *rdsrc);
/* DTRC */
int dcss_dtrc_init(struct dcss_dev *dcss, unsigned long dtrc_base);
void dcss_dtrc_exit(struct dcss_dtrc *dtrc);
void dcss_dtrc_bypass(struct dcss_dtrc *dtrc, int ch_num);
void dcss_dtrc_set_format_mod(struct dcss_dtrc *dtrc, int ch_num, u64 modifier);
void dcss_dtrc_addr_set(struct dcss_dtrc *dtrc, int ch_num,
u32 p1_ba, u32 p2_ba, uint64_t dec_table_ofs);
bool dcss_dtrc_ch_running(struct dcss_dtrc *dtrc, int ch_num);
bool dcss_dtrc_is_running(struct dcss_dtrc *dtrc);
void dcss_dtrc_enable(struct dcss_dtrc *dtrc, int ch_num, bool enable);
void dcss_dtrc_set_res(struct dcss_dtrc *dtrc, int ch_num,
struct drm_plane_state *state, u32 *dtrc_w, u32 *dtrc_h);
void dcss_dtrc_switch_banks(struct dcss_dtrc *dtrc);
#endif /* __DCSS_PRV_H__ */

View File

@ -0,0 +1,571 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include "dcss-dev.h"
#define DCSS_DPR_SYSTEM_CTRL0 0x000
#define RUN_EN BIT(0)
#define SOFT_RESET BIT(1)
#define REPEAT_EN BIT(2)
#define SHADOW_LOAD_EN BIT(3)
#define SW_SHADOW_LOAD_SEL BIT(4)
#define BCMD2AXI_MSTR_ID_CTRL BIT(16)
#define DCSS_DPR_IRQ_MASK 0x020
#define DCSS_DPR_IRQ_MASK_STATUS 0x030
#define DCSS_DPR_IRQ_NONMASK_STATUS 0x040
#define IRQ_DPR_CTRL_DONE BIT(0)
#define IRQ_DPR_RUN BIT(1)
#define IRQ_DPR_SHADOW_LOADED BIT(2)
#define IRQ_AXI_READ_ERR BIT(3)
#define DPR2RTR_YRGB_FIFO_OVFL BIT(4)
#define DPR2RTR_UV_FIFO_OVFL BIT(5)
#define DPR2RTR_FIFO_LD_BUF_RDY_YRGB_ERR BIT(6)
#define DPR2RTR_FIFO_LD_BUF_RDY_UV_ERR BIT(7)
#define DCSS_DPR_MODE_CTRL0 0x050
#define RTR_3BUF_EN BIT(0)
#define RTR_4LINE_BUF_EN BIT(1)
#define TILE_TYPE_POS 2
#define TILE_TYPE_MASK GENMASK(4, 2)
#define YUV_EN BIT(6)
#define COMP_2PLANE_EN BIT(7)
#define PIX_SIZE_POS 8
#define PIX_SIZE_MASK GENMASK(9, 8)
#define PIX_LUMA_UV_SWAP BIT(10)
#define PIX_UV_SWAP BIT(11)
#define B_COMP_SEL_POS 12
#define B_COMP_SEL_MASK GENMASK(13, 12)
#define G_COMP_SEL_POS 14
#define G_COMP_SEL_MASK GENMASK(15, 14)
#define R_COMP_SEL_POS 16
#define R_COMP_SEL_MASK GENMASK(17, 16)
#define A_COMP_SEL_POS 18
#define A_COMP_SEL_MASK GENMASK(19, 18)
#define DCSS_DPR_FRAME_CTRL0 0x070
#define HFLIP_EN BIT(0)
#define VFLIP_EN BIT(1)
#define ROT_ENC_POS 2
#define ROT_ENC_MASK GENMASK(3, 2)
#define ROT_FLIP_ORDER_EN BIT(4)
#define PITCH_POS 16
#define PITCH_MASK GENMASK(31, 16)
#define DCSS_DPR_FRAME_1P_CTRL0 0x090
#define DCSS_DPR_FRAME_1P_PIX_X_CTRL 0x0A0
#define DCSS_DPR_FRAME_1P_PIX_Y_CTRL 0x0B0
#define DCSS_DPR_FRAME_1P_BASE_ADDR 0x0C0
#define DCSS_DPR_FRAME_2P_CTRL0 0x0E0
#define DCSS_DPR_FRAME_2P_PIX_X_CTRL 0x0F0
#define DCSS_DPR_FRAME_2P_PIX_Y_CTRL 0x100
#define DCSS_DPR_FRAME_2P_BASE_ADDR 0x110
#define DCSS_DPR_STATUS_CTRL0 0x130
#define STATUS_MUX_SEL_MASK GENMASK(2, 0)
#define STATUS_SRC_SEL_POS 16
#define STATUS_SRC_SEL_MASK GENMASK(18, 16)
#define DCSS_DPR_STATUS_CTRL1 0x140
#define DCSS_DPR_RTRAM_CTRL0 0x200
#define NUM_ROWS_ACTIVE BIT(0)
#define THRES_HIGH_POS 1
#define THRES_HIGH_MASK GENMASK(3, 1)
#define THRES_LOW_POS 4
#define THRES_LOW_MASK GENMASK(6, 4)
#define ABORT_SEL BIT(7)
struct dcss_dpr_ch {
struct dcss_dpr *dpr;
void __iomem *base_reg;
u32 base_ofs;
struct drm_format_info format;
enum dcss_pix_size pix_size;
enum dcss_tile_type tile;
bool rtram_4line_en;
bool rtram_3buf_en;
u32 frame_ctrl;
u32 mode_ctrl;
u32 sys_ctrl;
u32 rtram_ctrl;
bool sys_ctrl_chgd;
u32 pitch;
int ch_num;
int irq;
bool use_dtrc;
};
struct dcss_dpr {
struct device *dev;
struct dcss_dtrc *dtrc;
struct dcss_ctxld *ctxld;
u32 ctx_id;
struct dcss_dpr_ch ch[3];
};
static void dcss_dpr_write(struct dcss_dpr_ch *ch, u32 val, u32 ofs)
{
struct dcss_dpr *dpr = ch->dpr;
dcss_ctxld_write(dpr->ctxld, dpr->ctx_id, val, ch->base_ofs + ofs);
}
static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base)
{
struct dcss_dpr_ch *ch;
int i;
for (i = 0; i < 3; i++) {
ch = &dpr->ch[i];
ch->base_ofs = dpr_base + i * 0x1000;
ch->base_reg = devm_ioremap(dpr->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(dpr->dev, "dpr: unable to remap ch %d base\n",
i);
return -ENOMEM;
}
ch->dpr = dpr;
ch->ch_num = i;
}
return 0;
}
int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
{
struct dcss_dpr *dpr;
dpr = devm_kzalloc(dcss->dev, sizeof(struct dcss_dpr), GFP_KERNEL);
if (!dpr)
return -ENOMEM;
dcss->dpr = dpr;
dpr->dev = dcss->dev;
dpr->ctxld = dcss->ctxld;
dpr->ctx_id = CTX_SB_HP;
dpr->dtrc = dcss->dtrc;
if (dcss_dpr_ch_init_all(dpr, dpr_base)) {
int i;
for (i = 0; i < 3; i++) {
if (dpr->ch[i].base_reg)
devm_iounmap(dpr->dev, dpr->ch[i].base_reg);
}
devm_kfree(dpr->dev, dpr);
return -ENOMEM;
}
return 0;
}
void dcss_dpr_exit(struct dcss_dpr *dpr)
{
int ch_no;
/* stop DPR on all channels */
for (ch_no = 0; ch_no < 3; ch_no++) {
struct dcss_dpr_ch *ch = &dpr->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0);
if (ch->base_reg)
devm_iounmap(dpr->dev, ch->base_reg);
}
devm_kfree(dpr->dev, dpr);
}
static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide,
u32 pix_format)
{
u8 pix_in_64byte_map[3][5] = {
/* LIN, GPU_STD, GPU_SUP, VPU_YUV420, VPU_VP9 */
{ 64, 8, 8, 8, 16}, /* PIX_SIZE_8 */
{ 32, 8, 8, 8, 8}, /* PIX_SIZE_16 */
{ 16, 4, 4, 8, 8}, /* PIX_SIZE_32 */
};
u32 offset;
u32 div_64byte_mod, pix_in_64byte;
pix_in_64byte = pix_in_64byte_map[ch->pix_size][ch->tile];
if (pix_format == DRM_FORMAT_NV12_10LE40)
pix_wide = pix_wide * 10 / 8;
div_64byte_mod = pix_wide % pix_in_64byte;
offset = (div_64byte_mod == 0) ? 0 : (pix_in_64byte - div_64byte_mod);
return pix_wide + offset;
}
static u32 dcss_dpr_y_pix_high_adjust(struct dcss_dpr_ch *ch, u32 pix_high,
u32 pix_format)
{
u8 num_rows_buf = ch->rtram_4line_en ? 4 : 8;
u32 offset, pix_y_mod;
pix_y_mod = pix_high % num_rows_buf;
offset = pix_y_mod ? (num_rows_buf - pix_y_mod) : 0;
return pix_high + offset;
}
void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
u32 pix_format = ch->format.format;
u32 gap = DCSS_DPR_FRAME_2P_BASE_ADDR - DCSS_DPR_FRAME_1P_BASE_ADDR;
int plane, max_planes = 1;
u32 pix_x_wide, pix_y_high;
if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21 ||
pix_format == DRM_FORMAT_NV12_10LE40)
max_planes = 2;
for (plane = 0; plane < max_planes; plane++) {
yres = plane == 1 ? yres >> 1 : yres;
pix_x_wide = dcss_dpr_x_pix_wide_adjust(ch, xres, pix_format);
pix_y_high = dcss_dpr_y_pix_high_adjust(ch, yres, pix_format);
if (plane == 0)
ch->pitch = pix_x_wide;
dcss_dpr_write(ch, pix_x_wide,
DCSS_DPR_FRAME_1P_PIX_X_CTRL + plane * gap);
dcss_dpr_write(ch, pix_y_high,
DCSS_DPR_FRAME_1P_PIX_Y_CTRL + plane * gap);
dcss_dpr_write(ch, ch->use_dtrc ? 7 : 2,
DCSS_DPR_FRAME_1P_CTRL0 + plane * gap);
}
}
void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
u32 chroma_base_addr, u16 pitch)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
if (ch->use_dtrc) {
luma_base_addr = 0x0;
chroma_base_addr = 0x10000000;
}
if (!dcss_dtrc_ch_running(dpr->dtrc, ch_num)) {
dcss_dpr_write(ch, luma_base_addr, DCSS_DPR_FRAME_1P_BASE_ADDR);
dcss_dpr_write(ch, chroma_base_addr,
DCSS_DPR_FRAME_2P_BASE_ADDR);
}
if (ch->use_dtrc)
pitch = ch->pitch;
ch->frame_ctrl &= ~PITCH_MASK;
ch->frame_ctrl |= (((u32)pitch << PITCH_POS) & PITCH_MASK);
}
static void dcss_dpr_argb_comp_sel(struct dcss_dpr_ch *ch, int a_sel, int r_sel,
int g_sel, int b_sel)
{
u32 sel;
sel = ((a_sel << A_COMP_SEL_POS) & A_COMP_SEL_MASK) |
((r_sel << R_COMP_SEL_POS) & R_COMP_SEL_MASK) |
((g_sel << G_COMP_SEL_POS) & G_COMP_SEL_MASK) |
((b_sel << B_COMP_SEL_POS) & B_COMP_SEL_MASK);
ch->mode_ctrl &= ~(A_COMP_SEL_MASK | R_COMP_SEL_MASK |
G_COMP_SEL_MASK | B_COMP_SEL_MASK);
ch->mode_ctrl |= sel;
}
static void dcss_dpr_pix_size_set(struct dcss_dpr_ch *ch,
const struct drm_format_info *format)
{
u32 val;
switch (format->format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV12_10LE40:
val = 0;
break;
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
val = 1;
break;
default:
val = 2;
break;
}
ch->pix_size = val;
ch->mode_ctrl &= ~PIX_SIZE_MASK;
ch->mode_ctrl |= ((val << PIX_SIZE_POS) & PIX_SIZE_MASK);
}
static void dcss_dpr_uv_swap(struct dcss_dpr_ch *ch, bool swap)
{
ch->mode_ctrl &= ~PIX_UV_SWAP;
ch->mode_ctrl |= (swap ? PIX_UV_SWAP : 0);
}
static void dcss_dpr_y_uv_swap(struct dcss_dpr_ch *ch, bool swap)
{
ch->mode_ctrl &= ~PIX_LUMA_UV_SWAP;
ch->mode_ctrl |= (swap ? PIX_LUMA_UV_SWAP : 0);
}
static void dcss_dpr_2plane_en(struct dcss_dpr_ch *ch, bool en)
{
ch->mode_ctrl &= ~COMP_2PLANE_EN;
ch->mode_ctrl |= (en ? COMP_2PLANE_EN : 0);
}
static void dcss_dpr_yuv_en(struct dcss_dpr_ch *ch, bool en)
{
ch->mode_ctrl &= ~YUV_EN;
ch->mode_ctrl |= (en ? YUV_EN : 0);
}
void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
u32 sys_ctrl;
sys_ctrl = (en ? REPEAT_EN | RUN_EN : 0);
if (en) {
dcss_dpr_write(ch, ch->mode_ctrl, DCSS_DPR_MODE_CTRL0);
dcss_dpr_write(ch, ch->frame_ctrl, DCSS_DPR_FRAME_CTRL0);
dcss_dpr_write(ch, ch->rtram_ctrl, DCSS_DPR_RTRAM_CTRL0);
}
if (ch->sys_ctrl != sys_ctrl)
ch->sys_ctrl_chgd = true;
ch->sys_ctrl = sys_ctrl;
}
struct rgb_comp_sel {
u32 drm_format;
int a_sel;
int r_sel;
int g_sel;
int b_sel;
};
static struct rgb_comp_sel comp_sel_map[] = {
{DRM_FORMAT_ARGB8888, 3, 2, 1, 0},
{DRM_FORMAT_XRGB8888, 3, 2, 1, 0},
{DRM_FORMAT_ABGR8888, 3, 0, 1, 2},
{DRM_FORMAT_XBGR8888, 3, 0, 1, 2},
{DRM_FORMAT_RGBA8888, 0, 3, 2, 1},
{DRM_FORMAT_RGBX8888, 0, 3, 2, 1},
{DRM_FORMAT_BGRA8888, 0, 1, 2, 3},
{DRM_FORMAT_BGRX8888, 0, 1, 2, 3},
};
static int to_comp_sel(u32 pix_fmt, int *a_sel, int *r_sel, int *g_sel,
int *b_sel)
{
int i;
for (i = 0; i < ARRAY_SIZE(comp_sel_map); i++) {
if (comp_sel_map[i].drm_format == pix_fmt) {
*a_sel = comp_sel_map[i].a_sel;
*r_sel = comp_sel_map[i].r_sel;
*g_sel = comp_sel_map[i].g_sel;
*b_sel = comp_sel_map[i].b_sel;
return 0;
}
}
return -1;
}
static void dcss_dpr_rtram_set(struct dcss_dpr_ch *ch, u32 pix_format)
{
u32 val, mask;
switch (pix_format) {
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV12_10LE40:
ch->rtram_3buf_en = true;
ch->rtram_4line_en = false;
break;
default:
ch->rtram_3buf_en = true;
ch->rtram_4line_en = true;
break;
}
val = (ch->rtram_4line_en ? RTR_4LINE_BUF_EN : 0);
val |= (ch->rtram_3buf_en ? RTR_3BUF_EN : 0);
mask = RTR_4LINE_BUF_EN | RTR_3BUF_EN;
ch->mode_ctrl &= ~mask;
ch->mode_ctrl |= (val & mask);
val = (ch->rtram_4line_en ? 0 : NUM_ROWS_ACTIVE);
val |= (3 << THRES_LOW_POS) & THRES_LOW_MASK;
val |= (4 << THRES_HIGH_POS) & THRES_HIGH_MASK;
mask = THRES_LOW_MASK | THRES_HIGH_MASK | NUM_ROWS_ACTIVE;
ch->rtram_ctrl &= ~mask;
ch->rtram_ctrl |= (val & mask);
}
static void dcss_dpr_setup_components(struct dcss_dpr_ch *ch,
const struct drm_format_info *format)
{
int a_sel, r_sel, g_sel, b_sel;
bool uv_swap, y_uv_swap;
switch (format->format) {
case DRM_FORMAT_YVYU:
uv_swap = true;
y_uv_swap = true;
break;
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV21:
uv_swap = true;
y_uv_swap = false;
break;
case DRM_FORMAT_YUYV:
uv_swap = false;
y_uv_swap = true;
break;
default:
uv_swap = false;
y_uv_swap = false;
break;
}
dcss_dpr_uv_swap(ch, uv_swap);
dcss_dpr_y_uv_swap(ch, y_uv_swap);
if (!format->is_yuv) {
if (!to_comp_sel(format->format, &a_sel, &r_sel,
&g_sel, &b_sel)) {
dcss_dpr_argb_comp_sel(ch, a_sel, r_sel, g_sel, b_sel);
} else {
dcss_dpr_argb_comp_sel(ch, 3, 2, 1, 0);
}
} else {
dcss_dpr_argb_comp_sel(ch, 0, 0, 0, 0);
}
}
static void dcss_dpr_tile_set(struct dcss_dpr_ch *ch, uint64_t modifier)
{
switch (ch->ch_num) {
case 0:
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
ch->tile = TILE_LINEAR;
break;
case DRM_FORMAT_MOD_VIVANTE_TILED:
ch->tile = TILE_GPU_STANDARD;
break;
case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED_FC:
ch->tile = TILE_GPU_SUPER;
break;
default:
WARN_ON(1);
break;
}
break;
case 1:
case 2:
ch->tile = TILE_LINEAR;
break;
default:
WARN_ON(1);
return;
}
ch->mode_ctrl &= ~TILE_TYPE_MASK;
ch->mode_ctrl |= ((ch->tile << TILE_TYPE_POS) & TILE_TYPE_MASK);
}
void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
const struct drm_format_info *format, u64 modifier)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
ch->format = *format;
ch->use_dtrc = ch_num && modifier != DRM_FORMAT_MOD_LINEAR;
dcss_dpr_yuv_en(ch, format->is_yuv);
dcss_dpr_pix_size_set(ch, format);
dcss_dpr_setup_components(ch, format);
dcss_dpr_2plane_en(ch, format->num_planes == 2);
dcss_dpr_rtram_set(ch, format->format);
dcss_dpr_tile_set(ch, modifier);
}
/* This function will be called from interrupt context. */
void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr)
{
int chnum;
for (chnum = 0; chnum < 3; chnum++) {
struct dcss_dpr_ch *ch = &dpr->ch[chnum];
if (ch->sys_ctrl_chgd) {
dcss_ctxld_write_irqsafe(dpr->ctxld, dpr->ctx_id,
ch->sys_ctrl,
ch->base_ofs +
DCSS_DPR_SYSTEM_CTRL0);
ch->sys_ctrl_chgd = false;
}
}
}
void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
ch->frame_ctrl &= ~(HFLIP_EN | VFLIP_EN | ROT_ENC_MASK);
ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_X ? HFLIP_EN : 0;
ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_Y ? VFLIP_EN : 0;
if (rotation & DRM_MODE_ROTATE_90)
ch->frame_ctrl |= 1 << ROT_ENC_POS;
else if (rotation & DRM_MODE_ROTATE_180)
ch->frame_ctrl |= 2 << ROT_ENC_POS;
else if (rotation & DRM_MODE_ROTATE_270)
ch->frame_ctrl |= 3 << ROT_ENC_POS;
}

View File

@ -0,0 +1,187 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/component.h>
#include <drm/drm_of.h>
#include <drm/drmP.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
struct dcss_drv {
struct dcss_dev *dcss;
struct dcss_kms_dev *kms;
bool is_componentized;
};
struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev)
{
struct dcss_drv *mdrv = dev_get_drvdata(dev);
return mdrv ? mdrv->dcss : NULL;
}
struct drm_device *dcss_drv_dev_to_drm(struct device *dev)
{
struct dcss_drv *mdrv = dev_get_drvdata(dev);
return mdrv ? &mdrv->kms->base : NULL;
}
static int dcss_drv_init(struct device *dev, bool componentized)
{
struct dcss_drv *mdrv;
int err = 0;
mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL);
if (!mdrv)
return -ENOMEM;
mdrv->is_componentized = componentized;
mdrv->dcss = dcss_dev_create(dev, componentized);
if (IS_ERR(mdrv->dcss)) {
err = PTR_ERR(mdrv->dcss);
goto err;
}
dev_set_drvdata(dev, mdrv);
mdrv->kms = dcss_kms_attach(mdrv->dcss, componentized);
if (IS_ERR(mdrv->kms)) {
err = PTR_ERR(mdrv->kms);
goto dcss_shutoff;
}
return 0;
dcss_shutoff:
dcss_dev_destroy(mdrv->dcss);
dev_set_drvdata(dev, NULL);
err:
devm_kfree(dev, mdrv);
return err;
}
static void dcss_drv_deinit(struct device *dev, bool componentized)
{
struct dcss_drv *mdrv = dev_get_drvdata(dev);
if (!mdrv)
return;
dcss_kms_detach(mdrv->kms, componentized);
dcss_dev_destroy(mdrv->dcss);
dev_set_drvdata(dev, NULL);
}
static int dcss_drv_bind(struct device *dev)
{
return dcss_drv_init(dev, true);
}
static void dcss_drv_unbind(struct device *dev)
{
return dcss_drv_deinit(dev, true);
}
static const struct component_master_ops dcss_master_ops = {
.bind = dcss_drv_bind,
.unbind = dcss_drv_unbind,
};
static int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
}
static int dcss_drv_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct component_match *match = NULL;
struct device_node *remote;
if (!dev->of_node)
return -ENODEV;
remote = of_graph_get_remote_node(dev->of_node, 0, 0);
if (!remote)
return -ENODEV;
if (of_device_is_compatible(remote, "fsl,imx8mq-nwl-dsi")) {
of_node_put(remote);
return dcss_drv_init(dev, false);
}
drm_of_component_match_add(dev, &match, compare_of, remote);
of_node_put(remote);
return component_master_add_with_match(dev, &dcss_master_ops, match);
}
static int dcss_drv_platform_remove(struct platform_device *pdev)
{
struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
if (mdrv->is_componentized)
component_master_del(&pdev->dev, &dcss_master_ops);
else
dcss_drv_deinit(&pdev->dev, false);
return 0;
}
static struct dcss_type_data dcss_types[] = {
[DCSS_IMX8MQ] = {
.name = "DCSS_IMX8MQ",
.blkctl_ofs = 0x2F000,
.ctxld_ofs = 0x23000,
.dtg_ofs = 0x20000,
.rdsrc_ofs = 0x22000,
.wrscl_ofs = 0x21000,
.scaler_ofs = 0x1C000,
.ss_ofs = 0x1B000,
.dpr_ofs = 0x18000,
.dec400d_ofs = 0x15000,
.hdr10_ofs = 0x00000,
.dtrc_ofs = 0x16000,
},
};
static const struct of_device_id dcss_of_match[] = {
{ .compatible = "nxp,imx8mq-dcss", .data = &dcss_types[DCSS_IMX8MQ], },
{},
};
MODULE_DEVICE_TABLE(of, dcss_of_match);
static const struct dev_pm_ops dcss_dev_pm = {
SET_SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
SET_RUNTIME_PM_OPS(dcss_dev_runtime_suspend,
dcss_dev_runtime_resume, NULL)
};
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
.remove = dcss_drv_platform_remove,
.driver = {
.name = "imx-dcss",
.of_match_table = dcss_of_match,
.pm = &dcss_dev_pm,
},
};
module_platform_driver(dcss_platform_driver);
MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@nxp.com>");
MODULE_DESCRIPTION("DCSS driver for i.MX8MQ");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,454 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "dcss-dev.h"
#define DCSS_DTG_TC_CONTROL_STATUS 0x00
#define CH3_EN BIT(0)
#define CH2_EN BIT(1)
#define CH1_EN BIT(2)
#define OVL_DATA_MODE BIT(3)
#define BLENDER_VIDEO_ALPHA_SEL BIT(7)
#define DTG_START BIT(8)
#define DBY_MODE_EN BIT(9)
#define CH1_ALPHA_SEL BIT(10)
#define CSS_PIX_COMP_SWAP_POS 12
#define CSS_PIX_COMP_SWAP_MASK GENMASK(14, 12)
#define DEFAULT_FG_ALPHA_POS 24
#define DEFAULT_FG_ALPHA_MASK GENMASK(31, 24)
#define DCSS_DTG_TC_DTG 0x04
#define DCSS_DTG_TC_DISP_TOP 0x08
#define DCSS_DTG_TC_DISP_BOT 0x0C
#define DCSS_DTG_TC_CH1_TOP 0x10
#define DCSS_DTG_TC_CH1_BOT 0x14
#define DCSS_DTG_TC_CH2_TOP 0x18
#define DCSS_DTG_TC_CH2_BOT 0x1C
#define DCSS_DTG_TC_CH3_TOP 0x20
#define DCSS_DTG_TC_CH3_BOT 0x24
#define TC_X_POS 0
#define TC_X_MASK GENMASK(12, 0)
#define TC_Y_POS 16
#define TC_Y_MASK GENMASK(28, 16)
#define DCSS_DTG_TC_CTXLD 0x28
#define TC_CTXLD_DB_Y_POS 0
#define TC_CTXLD_DB_Y_MASK GENMASK(12, 0)
#define TC_CTXLD_SB_Y_POS 16
#define TC_CTXLD_SB_Y_MASK GENMASK(28, 16)
#define DCSS_DTG_TC_CH1_BKRND 0x2C
#define DCSS_DTG_TC_CH2_BKRND 0x30
#define BKRND_R_Y_COMP_POS 20
#define BKRND_R_Y_COMP_MASK GENMASK(29, 20)
#define BKRND_G_U_COMP_POS 10
#define BKRND_G_U_COMP_MASK GENMASK(19, 10)
#define BKRND_B_V_COMP_POS 0
#define BKRND_B_V_COMP_MASK GENMASK(9, 0)
#define DCSS_DTG_BLENDER_DBY_RANGEINV 0x38
#define DCSS_DTG_BLENDER_DBY_RANGEMIN 0x3C
#define DCSS_DTG_BLENDER_DBY_BDP 0x40
#define DCSS_DTG_BLENDER_BKRND_I 0x44
#define DCSS_DTG_BLENDER_BKRND_P 0x48
#define DCSS_DTG_BLENDER_BKRND_T 0x4C
#define DCSS_DTG_LINE0_INT 0x50
#define DCSS_DTG_LINE1_INT 0x54
#define DCSS_DTG_BG_ALPHA_DEFAULT 0x58
#define DCSS_DTG_INT_STATUS 0x5C
#define DCSS_DTG_INT_CONTROL 0x60
#define DCSS_DTG_TC_CH3_BKRND 0x64
#define DCSS_DTG_INT_MASK 0x68
#define LINE0_IRQ BIT(0)
#define LINE1_IRQ BIT(1)
#define LINE2_IRQ BIT(2)
#define LINE3_IRQ BIT(3)
#define DCSS_DTG_LINE2_INT 0x6C
#define DCSS_DTG_LINE3_INT 0x70
#define DCSS_DTG_DBY_OL 0x74
#define DCSS_DTG_DBY_BL 0x78
#define DCSS_DTG_DBY_EL 0x7C
struct dcss_dtg {
struct device *dev;
struct dcss_ctxld *ctxld;
void __iomem *base_reg;
u32 base_ofs;
u32 ctx_id;
bool in_use;
bool hdmi_output;
u32 dis_ulc_x;
u32 dis_ulc_y;
u32 control_status;
u32 alpha;
int ctxld_kick_irq;
bool ctxld_kick_irq_en;
struct clk *pix_clk;
struct clk *pll_src_clk;
struct clk *pll_phy_ref_clk;
/*
* This will be passed on by DRM CRTC so that we can signal when DTG has
* been successfully stopped. Otherwise, any modesetting while DTG is
* still ON may result in unpredictable behavior.
*/
struct completion *dis_completion;
};
static void dcss_dtg_write(struct dcss_dtg *dtg, u32 val, u32 ofs)
{
if (!dtg->in_use)
dcss_writel(val, dtg->base_reg + ofs);
dcss_ctxld_write(dtg->ctxld, dtg->ctx_id, val, dtg->base_ofs + ofs);
}
static irqreturn_t dcss_dtg_irq_handler(int irq, void *data)
{
struct dcss_dtg *dtg = data;
u32 status;
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
if (!(status & LINE0_IRQ))
return IRQ_HANDLED;
dcss_ctxld_kick(dtg->ctxld);
dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
return IRQ_HANDLED;
}
static int dcss_dtg_irq_config(struct dcss_dtg *dtg,
struct platform_device *pdev)
{
int ret;
dtg->ctxld_kick_irq = platform_get_irq_byname(pdev, "ctxld_kick");
if (dtg->ctxld_kick_irq < 0) {
dev_err(dtg->dev, "dtg: can't get line2 irq number\n");
return dtg->ctxld_kick_irq;
}
ret = devm_request_irq(dtg->dev, dtg->ctxld_kick_irq,
dcss_dtg_irq_handler,
IRQF_TRIGGER_HIGH,
"dcss_ctxld_kick", dtg);
if (ret) {
dev_err(dtg->dev, "dtg: irq request failed.\n");
return ret;
}
disable_irq(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = false;
dcss_update(LINE0_IRQ, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
return 0;
}
int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
{
int ret = 0;
struct dcss_dtg *dtg;
dtg = devm_kzalloc(dcss->dev, sizeof(*dtg), GFP_KERNEL);
if (!dtg)
return -ENOMEM;
dcss->dtg = dtg;
dtg->dev = dcss->dev;
dtg->ctxld = dcss->ctxld;
dtg->hdmi_output = dcss->hdmi_output;
dtg->base_reg = devm_ioremap(dcss->dev, dtg_base, SZ_4K);
if (!dtg->base_reg) {
dev_err(dcss->dev, "dtg: unable to remap dtg base\n");
ret = -ENOMEM;
goto err_ioremap;
}
dtg->base_ofs = dtg_base;
dtg->ctx_id = CTX_DB;
dtg->pix_clk = dcss->pix_clk;
dtg->pll_src_clk = dcss->pll_src_clk;
dtg->pll_phy_ref_clk = dcss->pll_phy_ref_clk;
dtg->alpha = 255;
dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL |
((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK);
ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev));
if (ret)
goto err_irq;
return 0;
err_irq:
devm_iounmap(dtg->dev, dtg->base_reg);
err_ioremap:
devm_kfree(dtg->dev, dtg);
return ret;
}
void dcss_dtg_exit(struct dcss_dtg *dtg)
{
/* stop DTG */
dcss_writel(DTG_START, dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS);
devm_free_irq(dtg->dev, dtg->ctxld_kick_irq, dtg);
if (dtg->base_reg)
devm_iounmap(dtg->dev, dtg->base_reg);
devm_kfree(dtg->dev, dtg);
}
void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm)
{
u16 dtg_lrc_x, dtg_lrc_y;
u16 dis_ulc_x, dis_ulc_y;
u16 dis_lrc_x, dis_lrc_y;
u32 sb_ctxld_trig, db_ctxld_trig;
u32 pixclock = vm->pixelclock;
u32 actual_clk;
dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
vm->vactive - 1;
dis_ulc_x = vm->hsync_len + vm->hback_porch - 1;
dis_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch - 1;
dis_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
dis_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
vm->vactive - 1;
clk_disable_unprepare(dtg->pix_clk);
if (dtg->hdmi_output) {
int err;
clk_disable_unprepare(dtg->pll_src_clk);
err = clk_set_parent(dtg->pll_src_clk, dtg->pll_phy_ref_clk);
if (err < 0)
dev_warn(dtg->dev, "clk_set_parent() returned %d", err);
clk_prepare_enable(dtg->pll_src_clk);
}
clk_set_rate(dtg->pix_clk, vm->pixelclock);
clk_prepare_enable(dtg->pix_clk);
actual_clk = clk_get_rate(dtg->pix_clk);
if (pixclock != actual_clk) {
dev_info(dtg->dev,
"Pixel clock set to %u kHz instead of %u kHz.\n",
(actual_clk / 1000), (pixclock / 1000));
}
msleep(50);
dcss_dtg_write(dtg, ((dtg_lrc_y << TC_Y_POS) | dtg_lrc_x),
DCSS_DTG_TC_DTG);
dcss_dtg_write(dtg, ((dis_ulc_y << TC_Y_POS) | dis_ulc_x),
DCSS_DTG_TC_DISP_TOP);
dcss_dtg_write(dtg, ((dis_lrc_y << TC_Y_POS) | dis_lrc_x),
DCSS_DTG_TC_DISP_BOT);
dtg->dis_ulc_x = dis_ulc_x;
dtg->dis_ulc_y = dis_ulc_y;
sb_ctxld_trig = ((0 * dis_lrc_y / 100) << TC_CTXLD_SB_Y_POS) &
TC_CTXLD_SB_Y_MASK;
db_ctxld_trig = ((99 * dis_lrc_y / 100) << TC_CTXLD_DB_Y_POS) &
TC_CTXLD_DB_Y_MASK;
dcss_dtg_write(dtg, sb_ctxld_trig | db_ctxld_trig, DCSS_DTG_TC_CTXLD);
/* vblank trigger */
dcss_dtg_write(dtg, 0, DCSS_DTG_LINE1_INT);
/* CTXLD trigger */
dcss_dtg_write(dtg, ((90 * dis_lrc_y) / 100) << 16, DCSS_DTG_LINE0_INT);
}
void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
int px, int py, int pw, int ph)
{
u16 p_ulc_x, p_ulc_y;
u16 p_lrc_x, p_lrc_y;
p_ulc_x = dtg->dis_ulc_x + px;
p_ulc_y = dtg->dis_ulc_y + py;
p_lrc_x = p_ulc_x + pw;
p_lrc_y = p_ulc_y + ph;
if (!px && !py && !pw && !ph) {
dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
} else {
dcss_dtg_write(dtg, ((p_ulc_y << TC_Y_POS) | p_ulc_x),
DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
dcss_dtg_write(dtg, ((p_lrc_y << TC_Y_POS) | p_lrc_x),
DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
}
}
bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha)
{
if (ch_num)
return false;
return alpha != dtg->alpha;
}
void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
const struct drm_format_info *format, int alpha)
{
u32 alpha_val;
/* we care about alpha only when channel 0 is concerned */
if (ch_num)
return;
alpha_val = (alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK;
/*
* Use global alpha if pixel format does not have alpha channel or the
* user explicitly chose to use global alpha (i.e. alpha is not OPAQUE).
*/
if (!format->has_alpha || alpha != 255) {
dtg->control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK);
dtg->control_status |= alpha_val;
} else { /* use per-pixel alpha otherwise */
dtg->control_status |= CH1_ALPHA_SEL;
}
dtg->alpha = alpha;
}
void dcss_dtg_css_set(struct dcss_dtg *dtg, bool out_is_yuv)
{
dtg->control_status &= ~CSS_PIX_COMP_SWAP_MASK;
if (out_is_yuv)
return;
dtg->control_status |=
(0x5 << CSS_PIX_COMP_SWAP_POS) & CSS_PIX_COMP_SWAP_MASK;
}
static void dcss_dtg_disable_callback(void *data)
{
struct dcss_dtg *dtg = data;
dtg->control_status &= ~DTG_START;
dcss_writel(dtg->control_status,
dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS);
dtg->in_use = false;
complete(dtg->dis_completion);
}
void dcss_dtg_enable(struct dcss_dtg *dtg, bool en,
struct completion *dis_completion)
{
if (!en) {
dtg->dis_completion = dis_completion;
dcss_ctxld_register_dtg_disable_cb(dtg->ctxld,
dcss_dtg_disable_callback,
dtg);
return;
}
dtg->dis_completion = NULL;
dtg->control_status |= DTG_START;
dcss_dtg_write(dtg, dtg->control_status, DCSS_DTG_TC_CONTROL_STATUS);
dtg->in_use = true;
}
bool dcss_dtg_is_enabled(struct dcss_dtg *dtg)
{
return dtg->in_use;
}
void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en)
{
u32 ch_en_map[] = {CH1_EN, CH2_EN, CH3_EN};
u32 control_status;
control_status = dtg->control_status & ~ch_en_map[ch_num];
control_status |= en ? ch_en_map[ch_num] : 0;
if (dtg->control_status != control_status)
dcss_dtg_write(dtg, control_status, DCSS_DTG_TC_CONTROL_STATUS);
dtg->control_status = control_status;
}
void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en)
{
u32 status;
dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
if (en) {
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
dcss_writel(status & LINE1_IRQ,
dtg->base_reg + DCSS_DTG_INT_CONTROL);
}
}
void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en)
{
u32 status;
if (en) {
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
if (!dtg->ctxld_kick_irq_en) {
dcss_writel(status & LINE0_IRQ,
dtg->base_reg + DCSS_DTG_INT_CONTROL);
enable_irq(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = true;
return;
}
return;
}
if (!dtg->ctxld_kick_irq_en)
return;
disable_irq_nosync(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = false;
}
void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg)
{
dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
}
bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg)
{
return !!(dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS) & LINE1_IRQ);
}

View File

@ -0,0 +1,514 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
#include "dcss-dev.h"
#define DTRC_F0_OFS 0x00
#define DTRC_F1_OFS 0x60
#define DCSS_DTRC_DYDSADDR 0x00
#define DCSS_DTRC_DCDSADDR 0x04
#define DCSS_DTRC_DYTSADDR 0x08
#define DCSS_DTRC_DCTSADDR 0x0C
#define DCSS_DTRC_SIZE 0x10
#define FRAME_WIDTH_POS 0
#define FRAME_WIDTH_MASK GENMASK(9, 0)
#define FRAME_HEIGHT_POS 16
#define FRAME_HEIGHT_MASK GENMASK(25, 16)
#define DCSS_DTRC_SYSSA 0x14
#define DCSS_DTRC_SYSEA 0x18
#define DCSS_DTRC_SUVSSA 0x1C
#define DCSS_DTRC_SUVSEA 0x20
#define DCSS_DTRC_CROPORIG 0x24
#define DCSS_DTRC_CROPSIZE 0x28
#define CROP_HEIGHT_POS 16
#define CROP_HEIGHT_MASK GENMASK(28, 16)
#define CROP_WIDTH_POS 0
#define CROP_WIDTH_MASK GENMASK(12, 0)
#define DCSS_DTRC_DCTL 0x2C
#define CROPPING_EN BIT(18)
#define COMPRESSION_DIS BIT(17)
#define PIX_DEPTH_8BIT_EN BIT(1)
#define CONFIG_READY BIT(0)
#define DCSS_DTRC_DYDSADDR_EXT 0x30
#define DCSS_DTRC_DCDSADDR_EXT 0x34
#define DCSS_DTRC_DYTSADDR_EXT 0x38
#define DCSS_DTRC_DCTSADDR_EXT 0x3C
#define DCSS_DTRC_SYSSA_EXT 0x40
#define DCSS_DTRC_SYSEA_EXT 0x44
#define DCSS_DTRC_SUVSSA_EXT 0x48
#define DCSS_DTRC_SUVSEA_EXT 0x4C
#define DCSS_DTRC_INTEN 0xC0
#define DCSS_DTRC_FDINTR 0xC4
#define DCSS_DTRC_DTCTRL 0xC8
#define CURRENT_FRAME BIT(31)
#define ADDRESS_ID_ENABLE BIT(30)
#define ENDIANNESS_10BIT BIT(29)
#define MERGE_ARID_ENABLE BIT(28)
#define NON_G1_2_SWAP_MODE_POS 24
#define NON_G1_2_SWAP_MODE_MASK GENMASK(27, 24)
#define TABLE_DATA_SWAP_POS 20
#define TABLE_DATA_SWAP_MASK GENMASK(23, 20)
#define TILED_SWAP_POS 16
#define TILED_SWAP_MASK GENMASK(19, 16)
#define RASTER_SWAP_POS 12
#define RASTER_SWAP_MASK GENMASK(15, 12)
#define BURST_LENGTH_POS 4
#define BURST_LENGTH_MASK GENMASK(11, 4)
#define G1_TILED_DATA_EN BIT(3)
#define HOT_RESET BIT(2)
#define ARIDR_MODE_DETILE 0
#define ARIDR_MODE_BYPASS 2
#define DCSS_DTRC_ARIDR 0xCC
#define DCSS_DTRC_DTID2DDR 0xD0
#define DCSS_DTRC_CONFIG 0xD4
#define DCSS_DTRC_VER 0xD8
#define DCSS_DTRC_PFCTRL 0xF0
#define DCSS_DTRC_PFCR 0xF4
#define DCSS_DTRC_TOCR 0xF8
struct dcss_dtrc_ch {
struct dcss_dtrc *dtrc;
void __iomem *base_reg;
u32 base_ofs;
u32 xres;
u32 yres;
u32 pix_format;
u64 format_modifier;
u32 y_dec_ofs;
u32 uv_dec_ofs;
int curr_frame;
u32 dctl;
bool bypass;
bool running;
int irq;
int ch_num;
};
struct dcss_dtrc {
struct device *dev;
struct dcss_dtrc_ch ch[2];
u32 ctx_id;
struct dcss_ctxld *ctxld;
};
static irqreturn_t dcss_dtrc_irq_handler(int irq, void *data)
{
struct dcss_dtrc_ch *ch = data;
u32 b0, b1, curr_bank;
b0 = dcss_readl(ch->base_reg + DCSS_DTRC_DCTL) & 0x1;
b1 = dcss_readl(ch->base_reg + DTRC_F1_OFS + DCSS_DTRC_DCTL) & 0x1;
curr_bank = dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) >> 31;
dcss_update(1, 1, ch->base_reg + DCSS_DTRC_FDINTR);
return IRQ_HANDLED;
}
static int dcss_dtrc_irq_config(struct dcss_dtrc *dtrc, int ch_num)
{
struct platform_device *pdev = to_platform_device(dtrc->dev);
struct dcss_dtrc_ch *ch = &dtrc->ch[ch_num];
char irq_name[20];
int ret;
sprintf(irq_name, "dtrc_ch%d", ch_num + 1);
irq_name[8] = 0;
ch->irq = platform_get_irq_byname(pdev, irq_name);
if (ch->irq < 0) {
dev_err(dtrc->dev, "dtrc: can't get DTRC irq\n");
return ch->irq;
}
ret = devm_request_irq(dtrc->dev, ch->irq,
dcss_dtrc_irq_handler,
IRQF_TRIGGER_HIGH,
"dcss-dtrc", ch);
if (ret) {
dev_err(dtrc->dev, "dtrc: irq request failed.\n");
return ret;
}
dcss_writel(1, ch->base_reg + DCSS_DTRC_INTEN);
return 0;
}
static int dcss_dtrc_ch_init_all(struct dcss_dtrc *dtrc, u32 dtrc_base)
{
struct dcss_dtrc_ch *ch;
int i, ret;
for (i = 0; i < 2; i++) {
ch = &dtrc->ch[i];
ch->base_ofs = dtrc_base + i * 0x1000;
ch->base_reg = devm_ioremap(dtrc->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(dtrc->dev, "dtrc: unable to remap ch base\n");
return -ENOMEM;
}
ch->ch_num = i;
ch->dtrc = dtrc;
ret = dcss_dtrc_irq_config(dtrc, i);
if (ret)
return ret;
}
return 0;
}
static void dcss_dtrc_write(struct dcss_dtrc_ch *ch, u32 val, u32 ofs)
{
dcss_ctxld_write(ch->dtrc->ctxld, ch->dtrc->ctx_id,
val, ch->base_ofs + ofs);
}
static void dcss_dtrc_write_irqsafe(struct dcss_dtrc_ch *ch, u32 val, u32 ofs)
{
dcss_ctxld_write_irqsafe(ch->dtrc->ctxld, ch->dtrc->ctx_id,
val, ch->base_ofs + ofs);
}
int dcss_dtrc_init(struct dcss_dev *dcss, unsigned long dtrc_base)
{
struct dcss_dtrc *dtrc;
dtrc = devm_kzalloc(dcss->dev, sizeof(*dtrc), GFP_KERNEL);
if (!dtrc)
return -ENOMEM;
dcss->dtrc = dtrc;
dtrc->dev = dcss->dev;
dtrc->ctxld = dcss->ctxld;
dtrc->ctx_id = CTX_SB_HP;
if (dcss_dtrc_ch_init_all(dtrc, dtrc_base)) {
struct dcss_dtrc_ch *ch;
int i;
for (i = 0; i < 2; i++) {
ch = &dtrc->ch[i];
if (ch->irq)
devm_free_irq(dtrc->dev, ch->irq, ch);
if (ch->base_reg)
devm_iounmap(dtrc->dev, ch->base_reg);
}
devm_kfree(dtrc->dev, dtrc);
return -ENOMEM;
}
return 0;
}
void dcss_dtrc_exit(struct dcss_dtrc *dtrc)
{
int ch_no;
for (ch_no = 0; ch_no < 2; ch_no++) {
struct dcss_dtrc_ch *ch = &dtrc->ch[ch_no];
if (ch->base_reg) {
/* reset the module to default */
dcss_writel(HOT_RESET,
ch->base_reg + DCSS_DTRC_DTCTRL);
devm_iounmap(dtrc->dev, ch->base_reg);
}
}
devm_kfree(dtrc->dev, dtrc);
}
void dcss_dtrc_bypass(struct dcss_dtrc *dtrc, int ch_num)
{
struct dcss_dtrc_ch *ch;
if (ch_num == 0)
return;
ch = &dtrc->ch[ch_num - 1];
if (ch->bypass)
return;
dcss_dtrc_write(ch, ARIDR_MODE_BYPASS, DCSS_DTRC_DTCTRL);
dcss_dtrc_write(ch, 0, DCSS_DTRC_DYTSADDR);
dcss_dtrc_write(ch, 0, DCSS_DTRC_DCTSADDR);
dcss_dtrc_write(ch, 0x0f0e0100, DCSS_DTRC_ARIDR);
dcss_dtrc_write(ch, 0x0f0e, DCSS_DTRC_DTID2DDR);
ch->bypass = true;
}
void dcss_dtrc_addr_set(struct dcss_dtrc *dtrc, int ch_num,
u32 p1_ba, u32 p2_ba, uint64_t dec_table_ofs)
{
struct dcss_dtrc_ch *ch;
if (ch_num == 0)
return;
ch = &dtrc->ch[ch_num - 1];
dcss_dtrc_write(ch, p1_ba, DCSS_DTRC_DYDSADDR);
dcss_dtrc_write(ch, p2_ba, DCSS_DTRC_DCDSADDR);
dcss_dtrc_write(ch, p1_ba, DTRC_F1_OFS + DCSS_DTRC_DYDSADDR);
dcss_dtrc_write(ch, p2_ba, DTRC_F1_OFS + DCSS_DTRC_DCDSADDR);
if (ch->format_modifier == DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED) {
ch->y_dec_ofs = dec_table_ofs & 0xFFFFFFFF;
ch->uv_dec_ofs = dec_table_ofs >> 32;
dcss_dtrc_write(ch, p1_ba + ch->y_dec_ofs,
DCSS_DTRC_DYTSADDR);
dcss_dtrc_write(ch, p1_ba + ch->uv_dec_ofs,
DCSS_DTRC_DCTSADDR);
dcss_dtrc_write(ch, p1_ba + ch->y_dec_ofs,
DTRC_F1_OFS + DCSS_DTRC_DYTSADDR);
dcss_dtrc_write(ch, p1_ba + ch->uv_dec_ofs,
DTRC_F1_OFS + DCSS_DTRC_DCTSADDR);
}
ch->bypass = false;
}
void dcss_dtrc_set_res(struct dcss_dtrc *dtrc, int ch_num,
struct drm_plane_state *state, u32 *dtrc_w, u32 *dtrc_h)
{
struct drm_framebuffer *fb = state->fb;
u32 pixel_format = fb->format->format;
struct dcss_dtrc_ch *ch;
u32 frame_height, frame_width;
u32 crop_w, crop_h, crop_orig_w, crop_orig_h;
int bank;
u32 old_xres, old_yres, xres, yres;
u32 x1, y1, x2, y2;
u32 pix_depth;
u16 width_align = 0;
if (ch_num == 0)
return;
ch = &dtrc->ch[ch_num - 1];
bank = dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) >> 31;
ch->pix_format = pixel_format;
ch->format_modifier = fb->modifier;
pix_depth = ch->pix_format == DRM_FORMAT_NV12_10LE40 ? 10 : 8;
old_xres = state->src_w >> 16;
old_yres = state->src_h >> 16;
x1 = (state->src.x1 >> 16) & ~1;
y1 = (state->src.y1 >> 16) & ~1;
x2 = state->src.x2 >> 16;
y2 = state->src.y2 >> 16;
xres = x2 - x1;
yres = y2 - y1;
frame_height = ((old_yres >> 3) << FRAME_HEIGHT_POS) & FRAME_HEIGHT_MASK;
frame_width = ((old_xres >> 3) << FRAME_WIDTH_POS) & FRAME_WIDTH_MASK;
dcss_dtrc_write(ch, frame_height | frame_width,
DTRC_F1_OFS * bank + DCSS_DTRC_SIZE);
dcss_dtrc_write(ch, frame_height | frame_width,
DTRC_F1_OFS * (bank ^ 1) + DCSS_DTRC_SIZE);
/*
* Image original size is aligned:
* - 128 pixels for width (8-bit) or 256 (10-bit);
* - 8 lines for height;
*/
width_align = ch->pix_format == DRM_FORMAT_NV12_10LE40 ? 0xff : 0x7f;
if (xres == old_xres && !(xres & width_align) &&
yres == old_yres && !(yres & 0xf)) {
ch->dctl &= ~CROPPING_EN;
goto exit;
}
/* align the image size: down align for compressed formats */
if (ch->format_modifier == DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED && x1)
xres = xres & ~width_align;
else
xres = (xres + width_align) & ~width_align;
if (ch->format_modifier == DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED && y1)
yres = yres & ~0xf;
else
yres = (yres + 0xf) & ~0xf;
crop_orig_w = (x1 << CROP_WIDTH_POS) & CROP_WIDTH_MASK;
crop_orig_h = (y1 << CROP_HEIGHT_POS) & CROP_HEIGHT_MASK;
dcss_dtrc_write(ch, crop_orig_w | crop_orig_h,
DCSS_DTRC_CROPORIG);
dcss_dtrc_write(ch, crop_orig_w | crop_orig_h,
DTRC_F1_OFS + DCSS_DTRC_CROPORIG);
crop_w = (xres << CROP_WIDTH_POS) & CROP_WIDTH_MASK;
crop_h = (yres << CROP_HEIGHT_POS) & CROP_HEIGHT_MASK;
dcss_dtrc_write(ch, crop_w | crop_h,
DTRC_F1_OFS * bank + DCSS_DTRC_CROPSIZE);
dcss_dtrc_write(ch, crop_w | crop_h,
DTRC_F1_OFS * (bank ^ 1) + DCSS_DTRC_CROPSIZE);
ch->dctl |= CROPPING_EN;
exit:
dcss_dtrc_write(ch, xres * yres * pix_depth / 8,
DCSS_DTRC_SYSEA);
dcss_dtrc_write(ch, xres * yres * pix_depth / 8,
DTRC_F1_OFS + DCSS_DTRC_SYSEA);
dcss_dtrc_write(ch, 0x10000000 + xres * yres * pix_depth / 8 / 2,
DCSS_DTRC_SUVSEA);
dcss_dtrc_write(ch, 0x10000000 + xres * yres * pix_depth / 8 / 2,
DTRC_F1_OFS + DCSS_DTRC_SUVSEA);
*dtrc_w = xres;
*dtrc_h = yres;
if (ch->running)
return;
dcss_dtrc_write(ch, 0x0, DCSS_DTRC_SYSSA);
dcss_dtrc_write(ch, 0x0, DTRC_F1_OFS + DCSS_DTRC_SYSSA);
dcss_dtrc_write(ch, 0x10000000, DCSS_DTRC_SUVSSA);
dcss_dtrc_write(ch, 0x10000000, DTRC_F1_OFS + DCSS_DTRC_SUVSSA);
}
void dcss_dtrc_enable(struct dcss_dtrc *dtrc, int ch_num, bool enable)
{
struct dcss_dtrc_ch *ch;
int curr_frame;
u32 fdctl, dtctrl;
if (ch_num == 0)
return;
ch = &dtrc->ch[ch_num - 1];
if (ch->bypass)
return;
if (!enable) {
ch->running = false;
return;
}
if (ch->running)
return;
dcss_update(HOT_RESET, HOT_RESET, ch->base_reg + DCSS_DTRC_DTCTRL);
while (dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) & HOT_RESET)
usleep_range(100, 200);
dcss_dtrc_write(ch, 0x0f0e0100,
DCSS_DTRC_ARIDR);
dcss_dtrc_write(ch, 0x0f0e,
DCSS_DTRC_DTID2DDR);
dtctrl = ADDRESS_ID_ENABLE | MERGE_ARID_ENABLE |
((0xF << TABLE_DATA_SWAP_POS) & TABLE_DATA_SWAP_MASK) |
((0x10 << BURST_LENGTH_POS) & BURST_LENGTH_MASK);
if (ch->format_modifier == DRM_FORMAT_MOD_VSI_G1_TILED)
dtctrl |= G1_TILED_DATA_EN;
dcss_dtrc_write(ch, dtctrl, DCSS_DTRC_DTCTRL);
curr_frame = dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) >> 31;
fdctl = ch->dctl & ~(PIX_DEPTH_8BIT_EN | COMPRESSION_DIS);
fdctl |= ch->pix_format == DRM_FORMAT_NV12_10LE40 ? 0 : PIX_DEPTH_8BIT_EN;
if (ch->format_modifier != DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED)
fdctl |= COMPRESSION_DIS;
dcss_dtrc_write(ch, fdctl,
(curr_frame ^ 1) * DTRC_F1_OFS + DCSS_DTRC_DCTL);
dcss_dtrc_write(ch, fdctl | CONFIG_READY,
curr_frame * DTRC_F1_OFS + DCSS_DTRC_DCTL);
ch->curr_frame = curr_frame;
ch->dctl = fdctl;
ch->running = true;
}
bool dcss_dtrc_ch_running(struct dcss_dtrc *dtrc, int ch_num)
{
struct dcss_dtrc_ch *ch;
if (ch_num == 0)
return false;
ch = &dtrc->ch[ch_num - 1];
return ch->running;
}
bool dcss_dtrc_is_running(struct dcss_dtrc *dtrc)
{
return dtrc->ch[0].running || dtrc->ch[1].running;
}
static void dcss_dtrc_ch_switch_banks(struct dcss_dtrc *dtrc, int dtrc_ch)
{
struct dcss_dtrc_ch *ch = &dtrc->ch[dtrc_ch];
u32 b0, b1;
if (!ch->running)
return;
b0 = dcss_readl(ch->base_reg + DCSS_DTRC_DCTL) & 0x1;
b1 = dcss_readl(ch->base_reg + DTRC_F1_OFS + DCSS_DTRC_DCTL) & 0x1;
ch->curr_frame = dcss_readl(ch->base_reg + DCSS_DTRC_DTCTRL) >> 31;
dcss_dtrc_write_irqsafe(ch, ch->dctl | CONFIG_READY,
(ch->curr_frame ^ 1) * DTRC_F1_OFS + DCSS_DTRC_DCTL);
}
void dcss_dtrc_switch_banks(struct dcss_dtrc *dtrc)
{
dcss_dtrc_ch_switch_banks(dtrc, 0);
dcss_dtrc_ch_switch_banks(dtrc, 1);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,585 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <drm/drm_fourcc.h>
#include "dcss-dev.h"
#define USE_TBL_HEADER
#ifdef USE_TBL_HEADER
#include "dcss-hdr10-tables.h"
#endif
#define USE_CTXLD
#define DCSS_HDR10_A0_LUT 0x0000
#define DCSS_HDR10_A1_LUT 0x1000
#define DCSS_HDR10_A2_LUT 0x2000
/* one CSCA and CSCB for each channel(pipe) */
#define DCSS_HDR10_CSCA_BASE 0x3000
#define DCSS_HDR10_CSCB_BASE 0x3800
/* one CSCO for all channels(pipes) */
#define DCSS_HDR10_CSCO_BASE 0x3000
#define DCSS_HDR10_LUT_CONTROL (DCSS_HDR10_CSCA_BASE + 0x80)
#define LUT_ENABLE BIT(0)
#define LUT_EN_FOR_ALL_PELS BIT(1)
#define LUT_BYPASS BIT(15)
#define DCSS_HDR10_FL2FX (DCSS_HDR10_CSCB_BASE + 0x74)
#define DCSS_HDR10_LTNL (DCSS_HDR10_CSCO_BASE + 0x74)
#define LTNL_PASS_THRU BIT(0)
#define FIX2FLT_DISABLE BIT(1)
#define LTNL_EN_FOR_ALL_PELS BIT(2)
#define FIX2FLT_EN_FOR_ALL_PELS BIT(3)
/* following offsets are relative to CSC(A|B|O)_BASE */
#define DCSS_HDR10_CSC_CONTROL 0x00
#define CSC_EN BIT(0)
#define CSC_ALL_PIX_EN BIT(1)
#define CSC_BYPASS BIT(15)
#define DCSS_HDR10_CSC_H00 0x04
#define DCSS_HDR10_CSC_H10 0x08
#define DCSS_HDR10_CSC_H20 0x0C
#define DCSS_HDR10_CSC_H01 0x10
#define DCSS_HDR10_CSC_H11 0x14
#define DCSS_HDR10_CSC_H21 0x18
#define DCSS_HDR10_CSC_H02 0x1C
#define DCSS_HDR10_CSC_H12 0x20
#define DCSS_HDR10_CSC_H22 0x24
#define H_COEF_MASK GENMASK(15, 0)
#define DCSS_HDR10_CSC_IO0 0x28
#define DCSS_HDR10_CSC_IO1 0x2C
#define DCSS_HDR10_CSC_IO2 0x30
#define PRE_OFFSET_MASK GENMASK(9, 0)
#define DCSS_HDR10_CSC_IO_MIN0 0x34
#define DCSS_HDR10_CSC_IO_MIN1 0x38
#define DCSS_HDR10_CSC_IO_MIN2 0x3C
#define DCSS_HDR10_CSC_IO_MAX0 0x40
#define DCSS_HDR10_CSC_IO_MAX1 0x44
#define DCSS_HDR10_CSC_IO_MAX2 0x48
#define IO_CLIP_MASK GENMASK(9, 0)
#define DCSS_HDR10_CSC_NORM 0x4C
#define NORM_MASK GENMASK(4, 0)
#define DCSS_HDR10_CSC_OO0 0x50
#define DCSS_HDR10_CSC_OO1 0x54
#define DCSS_HDR10_CSC_OO2 0x58
#define POST_OFFSET_MASK GENMASK(27, 0)
#define DCSS_HDR10_CSC_OMIN0 0x5C
#define DCSS_HDR10_CSC_OMIN1 0x60
#define DCSS_HDR10_CSC_OMIN2 0x64
#define DCSS_HDR10_CSC_OMAX0 0x68
#define DCSS_HDR10_CSC_OMAX1 0x6C
#define DCSS_HDR10_CSC_OMAX2 0x70
#define POST_CLIP_MASK GENMASK(9, 0)
#define HDR10_IPIPE_LUT_MAX_ENTRIES 1024
#define HDR10_OPIPE_LUT_MAX_ENTRIES 1023
#define HDR10_CSC_MAX_REGS 29
#define OPIPE_CH_NO 3
/* Pipe config descriptor */
/* bits per component */
#define HDR10_BPC_POS 0
#define HDR10_BPC_MASK GENMASK(1, 0)
/* colorspace */
#define HDR10_CS_POS 2
#define HDR10_CS_MASK GENMASK(3, 2)
/* nonlinearity type */
#define HDR10_NL_POS 4
#define HDR10_NL_MASK GENMASK(8, 4)
/* pixel range */
#define HDR10_PR_POS 9
#define HDR10_PR_MASK GENMASK(10, 9)
/* gamut type */
#define HDR10_G_POS 11
#define HDR10_G_MASK GENMASK(15, 11)
/* FW Table Type Descriptor */
#define HDR10_TT_LUT BIT(0)
#define HDR10_TT_CSCA BIT(1)
#define HDR10_TT_CSCB BIT(2)
/* Pipe type */
#define HDR10_PT_OUTPUT BIT(3)
/* Output pipe config descriptor */
#define HDR10_IPIPE_DESC_POS 4
#define HDR10_IPIPE_DESC_MASK GENMASK(19, 4)
/* Input pipe config descriptor */
#define HDR10_OPIPE_DESC_POS 20
#define HDR10_OPIPE_DESC_MASK GENMASK(35, 20)
/* config invalid */
#define HDR10_DESC_INVALID BIT(63)
enum dcss_hdr10_csc {
HDR10_CSCA,
HDR10_CSCB,
};
struct dcss_hdr10_tbl_node {
struct list_head node;
u64 tbl_descriptor;
u32 *tbl_data;
};
struct dcss_hdr10_opipe_tbls {
struct list_head lut;
struct list_head csc;
};
struct dcss_hdr10_ipipe_tbls {
struct list_head lut;
struct list_head csca;
struct list_head cscb;
};
struct dcss_hdr10_ch {
struct dcss_hdr10 *hdr10;
void __iomem *base_reg;
u32 base_ofs;
u64 old_cfg_desc;
u32 id;
};
struct dcss_hdr10 {
struct device *dev;
struct dcss_ctxld *ctxld;
u32 ctx_id;
struct dcss_hdr10_ch ch[4]; /* 4th channel is, actually, OPIPE */
struct dcss_hdr10_ipipe_tbls *ipipe_tbls;
struct dcss_hdr10_opipe_tbls *opipe_tbls;
u8 *fw_data;
u32 fw_size;
};
static void dcss_hdr10_write(struct dcss_hdr10_ch *ch, u32 val, u32 ofs)
{
struct dcss_hdr10 *hdr10 = ch->hdr10;
dcss_ctxld_write(hdr10->ctxld, hdr10->ctx_id, val, ch->base_ofs + ofs);
}
static void dcss_hdr10_csc_fill(struct dcss_hdr10_ch *ch,
enum dcss_hdr10_csc csc_to_use,
u32 *map)
{
int i;
u32 csc_base_ofs[] = {
DCSS_HDR10_CSCA_BASE + DCSS_HDR10_CSC_CONTROL,
DCSS_HDR10_CSCB_BASE + DCSS_HDR10_CSC_CONTROL,
};
for (i = 0; i < HDR10_CSC_MAX_REGS; i++) {
u32 reg_ofs = csc_base_ofs[csc_to_use] + i * sizeof(u32);
dcss_hdr10_write(ch, map[i], reg_ofs);
}
}
static void dcss_hdr10_lut_fill(struct dcss_hdr10_ch *ch, u32 *map)
{
int i, comp;
u32 lut_base_ofs, ctrl_ofs, lut_entries;
if (ch->id == OPIPE_CH_NO) {
ctrl_ofs = DCSS_HDR10_LTNL;
lut_entries = HDR10_OPIPE_LUT_MAX_ENTRIES;
} else {
ctrl_ofs = DCSS_HDR10_LUT_CONTROL;
lut_entries = HDR10_IPIPE_LUT_MAX_ENTRIES;
}
if (ch->id != OPIPE_CH_NO)
dcss_hdr10_write(ch, *map++, ctrl_ofs);
for (comp = 0; comp < 3; comp++) {
lut_base_ofs = DCSS_HDR10_A0_LUT + comp * 0x1000;
if (ch->id == OPIPE_CH_NO) {
dcss_hdr10_write(ch, map[0], lut_base_ofs);
lut_base_ofs += 4;
}
for (i = 0; i < lut_entries; i++) {
u32 reg_ofs = lut_base_ofs + i * sizeof(u32);
dcss_hdr10_write(ch, map[i], reg_ofs);
}
}
map += lut_entries;
if (ch->id != OPIPE_CH_NO)
dcss_hdr10_write(ch, *map, DCSS_HDR10_FL2FX);
else
dcss_hdr10_write(ch, *map, ctrl_ofs);
}
static int dcss_hdr10_ch_init_all(struct dcss_hdr10 *hdr10,
unsigned long hdr10_base)
{
struct dcss_hdr10_ch *ch;
int i;
for (i = 0; i < 4; i++) {
ch = &hdr10->ch[i];
ch->base_ofs = hdr10_base + i * 0x4000;
ch->base_reg = devm_ioremap(hdr10->dev, ch->base_ofs, SZ_16K);
if (!ch->base_reg) {
dev_err(hdr10->dev, "hdr10: unable to remap ch base\n");
return -ENOMEM;
}
ch->old_cfg_desc = HDR10_DESC_INVALID;
ch->id = i;
ch->hdr10 = hdr10;
}
return 0;
}
static u32 *dcss_hdr10_find_tbl(u64 desc, struct list_head *head)
{
struct list_head *node;
struct dcss_hdr10_tbl_node *tbl_node;
list_for_each(node, head) {
tbl_node = container_of(node, struct dcss_hdr10_tbl_node, node);
if ((tbl_node->tbl_descriptor & desc) == desc)
return tbl_node->tbl_data;
}
return NULL;
}
static int dcss_hdr10_get_tbls(struct dcss_hdr10 *hdr10, bool input,
u64 desc, u32 **lut, u32 **csca, u32 **cscb)
{
struct list_head *lut_list, *csca_list, *cscb_list;
lut_list = input ? &hdr10->ipipe_tbls->lut : &hdr10->opipe_tbls->lut;
csca_list = input ? &hdr10->ipipe_tbls->csca : &hdr10->opipe_tbls->csc;
cscb_list = input ? &hdr10->ipipe_tbls->cscb : NULL;
*lut = dcss_hdr10_find_tbl(desc, lut_list);
*csca = dcss_hdr10_find_tbl(desc, csca_list);
*cscb = NULL;
if (cscb_list)
*cscb = dcss_hdr10_find_tbl(desc, cscb_list);
return 0;
}
static void dcss_hdr10_write_pipe_tbls(struct dcss_hdr10_ch *ch,
u32 *lut, u32 *csca, u32 *cscb)
{
if (csca)
dcss_hdr10_csc_fill(ch, HDR10_CSCA, csca);
if (ch->id != OPIPE_CH_NO && cscb)
dcss_hdr10_csc_fill(ch, HDR10_CSCB, cscb);
if (lut)
dcss_hdr10_lut_fill(ch, lut);
}
static int dcss_hdr10_tbl_add(struct dcss_hdr10 *hdr10, u64 desc, u32 sz,
u32 *data)
{
struct device *dev = hdr10->dev;
struct dcss_hdr10_tbl_node *node;
node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
/* we don't need to store the table type and pipe type */
node->tbl_descriptor = desc >> 4;
node->tbl_data = data;
if (!(desc & HDR10_PT_OUTPUT)) {
if (desc & HDR10_TT_LUT)
list_add(&node->node, &hdr10->ipipe_tbls->lut);
else if (desc & HDR10_TT_CSCA)
list_add(&node->node, &hdr10->ipipe_tbls->csca);
else if (desc & HDR10_TT_CSCB)
list_add(&node->node, &hdr10->ipipe_tbls->cscb);
return 0;
}
if (desc & HDR10_TT_LUT)
list_add(&node->node, &hdr10->opipe_tbls->lut);
else if (desc & HDR10_TT_CSCA)
list_add(&node->node, &hdr10->opipe_tbls->csc);
return 0;
}
static int dcss_hdr10_parse_fw_data(struct dcss_hdr10 *hdr10)
{
u32 *data = (u32 *)hdr10->fw_data;
u32 remaining = hdr10->fw_size / sizeof(u32);
u64 tbl_desc;
u32 tbl_size;
int ret;
while (remaining) {
tbl_desc = *((u64 *)data);
data += 2;
tbl_size = *data++;
ret = dcss_hdr10_tbl_add(hdr10, tbl_desc, tbl_size, data);
if (ret)
return ret;
data += tbl_size;
remaining -= tbl_size + 3;
}
return 0;
}
static void dcss_hdr10_cleanup_tbls(struct dcss_hdr10 *hdr10)
{
int i;
struct dcss_hdr10_tbl_node *tbl_node, *next;
struct list_head *tbls[] = {
&hdr10->ipipe_tbls->lut,
&hdr10->ipipe_tbls->csca,
&hdr10->ipipe_tbls->cscb,
&hdr10->opipe_tbls->lut,
&hdr10->opipe_tbls->csc,
};
for (i = 0; i < 5; i++) {
list_for_each_entry_safe(tbl_node, next, tbls[i], node) {
list_del(&tbl_node->node);
devm_kfree(hdr10->dev, tbl_node);
}
}
devm_kfree(hdr10->dev, hdr10->opipe_tbls);
devm_kfree(hdr10->dev, hdr10->ipipe_tbls);
}
#ifndef USE_TBL_HEADER
static void dcss_hdr10_fw_handler(const struct firmware *fw, void *context)
{
struct dcss_hdr10 *hdr10 = context;
int i;
if (!fw) {
dev_err(hdr10->dev, "hdr10: DCSS FW load failed.\n");
return;
}
/* we need to keep the tables for the entire life of the driver */
hdr10->fw_data = devm_kzalloc(hdr10->dev, fw->size, GFP_KERNEL);
if (!hdr10->fw_data)
return;
memcpy(hdr10->fw_data, fw->data, fw->size);
hdr10->fw_size = fw->size;
release_firmware(fw);
if (dcss_hdr10_parse_fw_data(hdr10)) {
dcss_hdr10_cleanup_tbls(hdr10);
return;
}
for (i = 0; i < 4; i++) {
u32 *lut, *csca, *cscb;
struct dcss_hdr10_ch *ch = &hdr10->ch[i];
bool is_input_pipe = i != OPIPE_CH_NO ? true : false;
if (ch->old_cfg_desc != HDR10_DESC_INVALID) {
dcss_hdr10_get_tbls(hdr10, is_input_pipe,
ch->old_cfg_desc, &lut,
&csca, &cscb);
dcss_hdr10_write_pipe_tbls(ch, lut, csca, cscb);
}
}
dev_info(hdr10->dev, "hdr10: DCSS FW loaded successfully\n");
}
#endif
static int dcss_hdr10_tbls_init(struct dcss_hdr10 *hdr10)
{
struct device *dev = hdr10->dev;
hdr10->ipipe_tbls = devm_kzalloc(dev, sizeof(*hdr10->ipipe_tbls),
GFP_KERNEL);
if (!hdr10->ipipe_tbls)
return -ENOMEM;
INIT_LIST_HEAD(&hdr10->ipipe_tbls->lut);
INIT_LIST_HEAD(&hdr10->ipipe_tbls->csca);
INIT_LIST_HEAD(&hdr10->ipipe_tbls->cscb);
hdr10->opipe_tbls = devm_kzalloc(dev, sizeof(*hdr10->opipe_tbls),
GFP_KERNEL);
if (!hdr10->opipe_tbls) {
devm_kfree(dev, hdr10->ipipe_tbls);
return -ENOMEM;
}
INIT_LIST_HEAD(&hdr10->opipe_tbls->lut);
INIT_LIST_HEAD(&hdr10->opipe_tbls->csc);
return 0;
}
int dcss_hdr10_init(struct dcss_dev *dcss, unsigned long hdr10_base)
{
int ret;
struct dcss_hdr10 *hdr10;
hdr10 = devm_kzalloc(dcss->dev, sizeof(*hdr10), GFP_KERNEL);
if (!hdr10)
return -ENOMEM;
dcss->hdr10 = hdr10;
hdr10->dev = dcss->dev;
hdr10->ctx_id = CTX_SB_HP;
hdr10->ctxld = dcss->ctxld;
ret = dcss_hdr10_tbls_init(hdr10);
if (ret < 0) {
dev_err(dcss->dev, "hdr10: Cannot init table lists.\n");
goto cleanup;
}
#ifndef USE_TBL_HEADER
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, "dcss.fw",
dcss->dev, GFP_KERNEL, hdr10,
dcss_hdr10_fw_handler);
if (ret < 0) {
dev_err(dcss->dev, "hdr10: Cannot async load DCSS FW.\n");
goto cleanup_tbls;
}
#else
hdr10->fw_data = (u8 *)dcss_hdr10_tables;
hdr10->fw_size = sizeof(dcss_hdr10_tables);
ret = dcss_hdr10_parse_fw_data(hdr10);
if (ret)
goto cleanup_tbls;
#endif
ret = dcss_hdr10_ch_init_all(hdr10, hdr10_base);
if (ret) {
int i;
for (i = 0; i < 4; i++) {
if (hdr10->ch[i].base_reg)
devm_iounmap(hdr10->dev, hdr10->ch[i].base_reg);
}
goto cleanup_tbls;
}
return 0;
cleanup_tbls:
dcss_hdr10_cleanup_tbls(hdr10);
cleanup:
devm_kfree(hdr10->dev, hdr10);
return ret;
}
void dcss_hdr10_exit(struct dcss_hdr10 *hdr10)
{
int i;
for (i = 0; i < 4; i++) {
if (hdr10->ch[i].base_reg)
devm_iounmap(hdr10->dev, hdr10->ch[i].base_reg);
}
dcss_hdr10_cleanup_tbls(hdr10);
devm_kfree(hdr10->dev, hdr10);
}
static u32 dcss_hdr10_pipe_desc(struct dcss_hdr10_pipe_cfg *pipe_cfg)
{
u32 desc;
desc = 2 << HDR10_BPC_POS;
desc |= pipe_cfg->is_yuv ? 2 << HDR10_CS_POS : 1 << HDR10_CS_POS;
desc |= ((1 << pipe_cfg->nl) << HDR10_NL_POS) & HDR10_NL_MASK;
desc |= ((1 << pipe_cfg->pr) << HDR10_PR_POS) & HDR10_PR_MASK;
desc |= ((1 << pipe_cfg->g) << HDR10_G_POS) & HDR10_G_MASK;
return desc;
}
static u64 dcss_hdr10_get_desc(struct dcss_hdr10_pipe_cfg *ipipe_cfg,
struct dcss_hdr10_pipe_cfg *opipe_cfg)
{
u32 ipipe_desc, opipe_desc;
ipipe_desc = dcss_hdr10_pipe_desc(ipipe_cfg);
opipe_desc = dcss_hdr10_pipe_desc(opipe_cfg);
return (ipipe_desc & 0xFFFF) | ((opipe_desc & 0xFFFF) << 16);
}
static void dcss_hdr10_pipe_setup(struct dcss_hdr10_ch *ch, u64 desc)
{
bool pipe_cfg_chgd;
u32 *csca, *cscb, *lut;
pipe_cfg_chgd = ch->old_cfg_desc != desc;
if (!pipe_cfg_chgd)
return;
dcss_hdr10_get_tbls(ch->hdr10, ch->id != OPIPE_CH_NO,
desc, &lut, &csca, &cscb);
dcss_hdr10_write_pipe_tbls(ch, lut, csca, cscb);
ch->old_cfg_desc = desc;
}
void dcss_hdr10_setup(struct dcss_hdr10 *hdr10, int ch_num,
struct dcss_hdr10_pipe_cfg *ipipe_cfg,
struct dcss_hdr10_pipe_cfg *opipe_cfg)
{
u64 desc = dcss_hdr10_get_desc(ipipe_cfg, opipe_cfg);
dcss_hdr10_pipe_setup(&hdr10->ch[ch_num], desc);
/*
* Input pipe configuration doesn't matter for configuring the output
* pipe. So, will just mask off the input part of the descriptor.
*/
dcss_hdr10_pipe_setup(&hdr10->ch[OPIPE_CH_NO], desc & ~0xffff);
}

View File

@ -0,0 +1,429 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drmP.h>
#include <linux/component.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops);
struct dcss_drm_commit {
struct work_struct work;
struct drm_device *drm;
struct drm_atomic_state *state;
};
static void dcss_kms_setup_opipe_gamut(u32 colorspace,
const struct drm_display_mode *mode,
enum dcss_hdr10_gamut *g,
enum dcss_hdr10_nonlinearity *nl)
{
u8 vic;
switch (colorspace) {
case DRM_MODE_COLORIMETRY_BT709_YCC:
case DRM_MODE_COLORIMETRY_XVYCC_709:
*g = G_REC709;
*nl = NL_REC709;
return;
case DRM_MODE_COLORIMETRY_SMPTE_170M_YCC:
case DRM_MODE_COLORIMETRY_XVYCC_601:
case DRM_MODE_COLORIMETRY_SYCC_601:
case DRM_MODE_COLORIMETRY_OPYCC_601:
*g = G_REC601_NTSC;
*nl = NL_REC709;
return;
case DRM_MODE_COLORIMETRY_BT2020_CYCC:
case DRM_MODE_COLORIMETRY_BT2020_RGB:
case DRM_MODE_COLORIMETRY_BT2020_YCC:
*g = G_REC2020;
*nl = NL_REC2084;
return;
case DRM_MODE_COLORIMETRY_OPRGB:
*g = G_ADOBE_ARGB;
*nl = NL_SRGB;
return;
default:
break;
}
/*
* If we reached this point, it means the default colorimetry is used.
*/
/* non-CEA mode, sRGB is used */
vic = drm_match_cea_mode(mode);
if (vic == 0) {
*g = G_ADOBE_ARGB;
*nl = NL_SRGB;
return;
}
if (mode->vdisplay == 480 || mode->vdisplay == 576 ||
mode->vdisplay == 240 || mode->vdisplay == 288) {
*g = G_REC601_NTSC;
*nl = NL_REC709;
return;
}
/* 2160p, 1080p, 720p */
*g = G_REC709;
*nl = NL_REC709;
}
#define YUV_MODE BIT(0)
void dcss_kms_setup_opipe(struct drm_connector_state *conn_state)
{
struct drm_crtc *crtc = conn_state->crtc;
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
int mode_flags = crtc->state->adjusted_mode.private_flags;
enum hdmi_quantization_range qr;
qr = drm_default_rgb_quant_range(&crtc->state->adjusted_mode);
dcss_kms_setup_opipe_gamut(conn_state->colorspace,
&crtc->state->adjusted_mode,
&dcss_crtc->opipe_g,
&dcss_crtc->opipe_nl);
dcss_crtc->opipe_pr = qr == HDMI_QUANTIZATION_RANGE_FULL ? PR_FULL :
PR_LIMITED;
dcss_crtc->output_is_yuv = !!(mode_flags & YUV_MODE);
}
static void dcss_kms_setup_output_pipe(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_display_info *di;
int i;
for_each_new_connector_in_state(state, connector, conn_state, i) {
if (!conn_state->best_encoder)
continue;
if (!conn_state->crtc->state->active ||
!drm_atomic_crtc_needs_modeset(conn_state->crtc->state))
continue;
crtc = connector->state->crtc;
di = &connector->display_info;
dcss_kms_setup_opipe(conn_state);
}
}
static void dcss_drm_atomic_commit_tail(struct dcss_drm_commit *commit)
{
struct drm_atomic_state *state = commit->state;
struct drm_device *drm = commit->drm;
struct dcss_kms_dev *kms = container_of(drm, struct dcss_kms_dev, base);
drm_atomic_helper_wait_for_fences(drm, state, false);
drm_atomic_helper_wait_for_dependencies(state);
drm_atomic_helper_commit_modeset_disables(drm, state);
dcss_kms_setup_output_pipe(state);
drm_atomic_helper_commit_modeset_enables(drm, state);
drm_atomic_helper_commit_planes(drm, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_put(state);
spin_lock(&kms->commit.wait.lock);
kms->commit.pending = false;
wake_up_all_locked(&kms->commit.wait);
spin_unlock(&kms->commit.wait.lock);
kfree(commit);
}
static void dcss_commit_work(struct work_struct *work)
{
struct dcss_drm_commit *commit = container_of(work,
struct dcss_drm_commit,
work);
dcss_drm_atomic_commit_tail(commit);
}
static int dcss_drm_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state,
bool nonblock)
{
int ret;
struct dcss_kms_dev *kms = container_of(drm, struct dcss_kms_dev, base);
struct dcss_drm_commit *commit;
if (state->async_update) {
ret = drm_atomic_helper_prepare_planes(drm, state);
if (ret)
return ret;
drm_atomic_helper_async_commit(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
return 0;
}
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
if (!commit)
return -ENOMEM;
commit->drm = drm;
commit->state = state;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
goto err_free;
INIT_WORK(&commit->work, dcss_commit_work);
ret = drm_atomic_helper_prepare_planes(drm, state);
if (ret)
goto err_free;
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(drm, state, true);
if (ret)
goto err;
}
spin_lock(&kms->commit.wait.lock);
ret = wait_event_interruptible_locked(kms->commit.wait,
!kms->commit.pending);
if (ret == 0)
kms->commit.pending = true;
spin_unlock(&kms->commit.wait.lock);
if (ret)
goto err;
ret = drm_atomic_helper_swap_state(state, true);
if (ret)
goto err;
drm_atomic_state_get(state);
if (nonblock)
queue_work(kms->commit_wq, &commit->work);
else
dcss_drm_atomic_commit_tail(commit);
return 0;
err:
drm_atomic_helper_cleanup_planes(drm, state);
err_free:
kfree(commit);
return ret;
}
const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = dcss_drm_atomic_commit,
};
static struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
.date = "20190917",
.major = 1,
.minor = 0,
.patchlevel = 0,
};
static const struct drm_mode_config_helper_funcs dcss_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms)
{
struct drm_mode_config *config = &kms->base.mode_config;
drm_mode_config_init(&kms->base);
config->min_width = 1;
config->min_height = 1;
config->max_width = 4096;
config->max_height = 4096;
config->allow_fb_modifiers = true;
config->normalize_zpos = true;
config->funcs = &dcss_drm_mode_config_funcs;
config->helper_private = &dcss_mode_config_helpers;
}
static const struct drm_encoder_funcs dcss_kms_simple_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int dcss_kms_setup_encoder(struct dcss_kms_dev *kms)
{
struct drm_device *ddev = &kms->base;
struct drm_encoder *encoder = &kms->encoder;
struct drm_crtc *crtc = (struct drm_crtc *)&kms->crtc;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0,
&panel, &bridge);
if (ret)
return ret;
if (!bridge) {
dev_err(ddev->dev, "No bridge found %d.\n", ret);
return -ENODEV;
}
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_encoder_init(&kms->base, encoder,
&dcss_kms_simple_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret) {
dev_err(ddev->dev, "Failed initializing encoder %d.\n", ret);
return ret;
}
return drm_bridge_attach(encoder, bridge, NULL);
}
struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss, bool componentized)
{
struct dcss_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
struct drm_device *drm;
struct dcss_crtc *crtc;
int ret;
if (!kms)
return ERR_PTR(-ENOMEM);
drm = &kms->base;
crtc = &kms->crtc;
ret = drm_dev_init(drm, &dcss_kms_driver, dcss->dev);
if (ret)
goto free_kms;
drm->dev_private = dcss;
dcss_kms_mode_config_init(kms);
ret = drm_vblank_init(drm, 1);
if (ret)
goto cleanup_mode_config;
drm->irq_enabled = true;
ret = dcss_crtc_init(crtc, drm);
if (ret)
goto cleanup_mode_config;
kms->commit_wq = alloc_ordered_workqueue("dcss_nonblock_commit_wq", 0);
if (!kms->commit_wq) {
ret = -ENOMEM;
goto cleanup_crtc;
}
init_waitqueue_head(&kms->commit.wait);
if (componentized)
ret = component_bind_all(dcss->dev, kms);
else
ret = dcss_kms_setup_encoder(kms);
if (ret)
goto cleanup_wq;
drm_mode_config_reset(drm);
dcss_crtc_attach_color_mgmt_properties(crtc);
drm_kms_helper_poll_init(drm);
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_wq;
drm_fbdev_generic_setup(drm, 32);
return kms;
cleanup_wq:
drm_kms_helper_poll_fini(drm);
destroy_workqueue(kms->commit_wq);
cleanup_crtc:
dcss_crtc_deinit(crtc, drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
free_kms:
kfree(kms);
return ERR_PTR(ret);
}
void dcss_kms_detach(struct dcss_kms_dev *kms, bool componentized)
{
struct drm_device *drm = &kms->base;
struct dcss_dev *dcss = drm->dev_private;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_crtc_vblank_off(&kms->crtc.base);
drm->irq_enabled = false;
drm_mode_config_cleanup(drm);
destroy_workqueue(kms->commit_wq);
dcss_crtc_deinit(&kms->crtc, drm);
if (componentized)
component_unbind_all(dcss->dev, drm);
drm->dev_private = NULL;
drm_dev_put(drm);
}

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 NXP.
*/
#ifndef _DCSS_KMS_H_
#define _DCSS_KMS_H_
#include <drm/drm_encoder.h>
struct dcss_plane {
struct drm_plane base;
uint64_t dtrc_table_ofs_val;
struct drm_property *dtrc_table_ofs_prop;
int ch_num;
enum drm_plane_type type;
bool use_dtrc;
};
struct dcss_crtc {
struct drm_crtc base;
struct drm_crtc_state *state;
struct dcss_plane *plane[3];
int irq;
bool irq_enabled;
struct completion en_completion;
struct completion dis_completion;
bool output_is_yuv;
enum dcss_hdr10_nonlinearity opipe_nl;
enum dcss_hdr10_gamut opipe_g;
enum dcss_hdr10_pixel_range opipe_pr;
};
struct commit {
wait_queue_head_t wait;
bool pending;
};
struct dcss_kms_dev {
struct drm_device base;
struct dcss_crtc crtc;
struct drm_encoder encoder;
struct workqueue_struct *commit_wq;
struct commit commit;
};
struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss, bool componentized);
void dcss_kms_detach(struct dcss_kms_dev *kms, bool componentized);
int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm);
void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm);
struct dcss_plane *dcss_plane_init(struct drm_device *drm,
unsigned int possible_crtcs,
enum drm_plane_type type,
unsigned int zpos);
void dcss_crtc_attach_color_mgmt_properties(struct dcss_crtc *crtc);
#endif

View File

@ -0,0 +1,673 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <linux/dma-buf.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
static const u32 dcss_graphics_formats[] = {
/* RGB */
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_RGBX1010102,
DRM_FORMAT_BGRX1010102,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_RGBA1010102,
DRM_FORMAT_BGRA1010102,
};
static const u32 dcss_video_formats[] = {
/* YUV444 */
DRM_FORMAT_AYUV,
/* YUV422 */
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
/* YUV420 */
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV12_10LE40,
};
static const u64 dcss_video_format_modifiers[] = {
DRM_FORMAT_MOD_VSI_G1_TILED,
DRM_FORMAT_MOD_VSI_G2_TILED,
DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const u64 dcss_graphics_format_modifiers[] = {
DRM_FORMAT_MOD_VIVANTE_TILED,
DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
DRM_FORMAT_MOD_VIVANTE_SUPER_TILED_FC,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static inline struct dcss_plane *to_dcss_plane(struct drm_plane *p)
{
return container_of(p, struct dcss_plane, base);
}
static inline bool dcss_plane_fb_is_linear(const struct drm_framebuffer *fb)
{
return ((fb->flags & DRM_MODE_FB_MODIFIERS) == 0) ||
((fb->flags & DRM_MODE_FB_MODIFIERS) != 0 &&
fb->modifier == DRM_FORMAT_MOD_LINEAR);
}
static void dcss_plane_destroy(struct drm_plane *plane)
{
struct dcss_plane *dcss_plane = container_of(plane, struct dcss_plane,
base);
drm_plane_cleanup(plane);
kfree(dcss_plane);
}
static int dcss_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
if (property == dcss_plane->dtrc_table_ofs_prop)
dcss_plane->dtrc_table_ofs_val = val;
else
return -EINVAL;
return 0;
}
static int dcss_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
if (property == dcss_plane->dtrc_table_ofs_prop)
*val = dcss_plane->dtrc_table_ofs_val;
else
return -EINVAL;
return 0;
}
static bool dcss_plane_format_mod_supported(struct drm_plane *plane,
u32 format,
u64 modifier)
{
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
switch (format) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB2101010:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED ||
modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED_FC;
default:
return modifier == DRM_FORMAT_MOD_LINEAR;
}
break;
case DRM_PLANE_TYPE_OVERLAY:
switch (format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV12_10LE40:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == DRM_FORMAT_MOD_VSI_G1_TILED ||
modifier == DRM_FORMAT_MOD_VSI_G2_TILED ||
modifier == DRM_FORMAT_MOD_VSI_G2_TILED_COMPRESSED;
default:
return modifier == DRM_FORMAT_MOD_LINEAR;
}
break;
default:
return false;
}
return false;
}
static const struct drm_plane_funcs dcss_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = dcss_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = dcss_plane_atomic_set_property,
.atomic_get_property = dcss_plane_atomic_get_property,
.format_mod_supported = dcss_plane_format_mod_supported,
};
static bool dcss_plane_can_rotate(const struct drm_format_info *format,
bool mod_present, u64 modifier,
unsigned int rotation)
{
bool linear_format = !mod_present ||
(mod_present && modifier == DRM_FORMAT_MOD_LINEAR);
u32 supported_rotation = DRM_MODE_ROTATE_0;
if (!format->is_yuv && linear_format)
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
else if (!format->is_yuv &&
modifier == DRM_FORMAT_MOD_VIVANTE_TILED)
supported_rotation = DRM_MODE_ROTATE_MASK |
DRM_MODE_REFLECT_MASK;
else if (format->is_yuv && linear_format &&
(format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21))
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
else if (format->is_yuv && linear_format &&
format->format == DRM_FORMAT_NV12_10LE40)
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y;
return !!(rotation & supported_rotation);
}
static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt)
{
if (src_w < 64 &&
(pix_fmt == DRM_FORMAT_NV12 || pix_fmt == DRM_FORMAT_NV21 ||
pix_fmt == DRM_FORMAT_NV12_10LE40))
return false;
else if (src_w < 32 &&
(pix_fmt == DRM_FORMAT_UYVY || pix_fmt == DRM_FORMAT_VYUY ||
pix_fmt == DRM_FORMAT_YUYV || pix_fmt == DRM_FORMAT_YVYU))
return false;
return src_w >= 16 && src_h >= 8;
}
static inline bool dcss_plane_use_dtrc(struct drm_framebuffer *fb,
enum drm_plane_type type)
{
u64 pix_format = fb->format->format;
return !dcss_plane_fb_is_linear(fb) &&
type == DRM_PLANE_TYPE_OVERLAY &&
(pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21 ||
pix_format == DRM_FORMAT_NV12_10LE40);
}
static int dcss_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
struct drm_gem_cma_object *cma_obj;
struct drm_crtc_state *crtc_state;
int hdisplay, vdisplay;
int min, max;
int ret;
if (!fb || !state->crtc)
return 0;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
WARN_ON(!cma_obj);
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
hdisplay = crtc_state->adjusted_mode.hdisplay;
vdisplay = crtc_state->adjusted_mode.vdisplay;
if (!dcss_plane_is_source_size_allowed(state->src_w >> 16,
state->src_h >> 16,
fb->format->format)) {
DRM_DEBUG_KMS("Source plane size is not allowed!\n");
return -EINVAL;
}
dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num,
&min, &max);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
min, max, !is_primary_plane,
false);
if (ret)
return ret;
if (!state->visible)
return 0;
if (!dcss_plane_can_rotate(fb->format,
!!(fb->flags & DRM_MODE_FB_MODIFIERS),
fb->modifier,
state->rotation)) {
DRM_DEBUG_KMS("requested rotation is not allowed!\n");
return -EINVAL;
}
if ((fb->flags & DRM_MODE_FB_MODIFIERS) &&
!plane->funcs->format_mod_supported(plane,
fb->format->format,
fb->modifier)) {
DRM_DEBUG_KMS("Invalid modifier: %llx", fb->modifier);
return -EINVAL;
}
dcss_plane->use_dtrc = dcss_plane_use_dtrc(fb, plane->type);
return 0;
}
static struct drm_gem_object *dcss_plane_gem_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct drm_gem_object *obj;
if (IS_ERR(dma_buf))
return ERR_CAST(dma_buf);
mutex_lock(&dev->object_name_lock);
obj = dev->driver->gem_prime_import(dev, dma_buf);
mutex_unlock(&dev->object_name_lock);
return obj;
}
static void dcss_plane_set_primary_base(struct dcss_plane *dcss_plane,
u32 baddr)
{
struct drm_plane *plane = &dcss_plane->base;
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct dma_buf *dma_buf = cma_obj->base.dma_buf;
struct drm_gem_object *gem_obj;
dma_addr_t caddr;
bool compressed = true;
u32 compressed_format = _VIV_CFMT_ARGB8;
_VIV_VIDMEM_METADATA *mdata;
if (dcss_plane_fb_is_linear(fb) ||
((fb->flags & DRM_MODE_FB_MODIFIERS) &&
(fb->modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
fb->modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED))) {
dcss_dec400d_bypass(dcss->dec400d);
return;
}
if (!dma_buf) {
caddr = cma_obj->paddr + ALIGN(fb->height, 64) * fb->pitches[0];
} else {
mdata = dma_buf->priv;
if (!mdata || mdata->magic != VIV_VIDMEM_METADATA_MAGIC)
return;
gem_obj = dcss_plane_gem_import(plane->dev, mdata->ts_dma_buf);
if (IS_ERR(gem_obj))
return;
caddr = to_drm_gem_cma_obj(gem_obj)->paddr;
/* release gem_obj */
drm_gem_object_put_unlocked(gem_obj);
dcss_dec400d_fast_clear_config(dcss->dec400d, mdata->fc_value,
mdata->fc_enabled);
compressed = !!mdata->compressed;
compressed_format = mdata->compress_format;
}
dcss_dec400d_read_config(dcss->dec400d, 0, compressed,
compressed_format);
dcss_dec400d_addr_set(dcss->dec400d, baddr, caddr);
}
static void dcss_plane_set_dtrc_base(struct dcss_plane *dcss_plane,
u32 p1_ba, u32 p2_ba)
{
struct drm_plane *plane = &dcss_plane->base;
struct dcss_dev *dcss = plane->dev->dev_private;
if (!dcss_plane->use_dtrc) {
dcss_dtrc_bypass(dcss->dtrc, dcss_plane->ch_num);
return;
}
dcss_dtrc_addr_set(dcss->dtrc, dcss_plane->ch_num,
p1_ba, p2_ba, dcss_plane->dtrc_table_ofs_val);
}
static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane)
{
struct drm_plane *plane = &dcss_plane->base;
struct drm_plane_state *state = plane->state;
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
unsigned long p1_ba = 0, p2_ba = 0;
u16 x1, y1;
x1 = state->src.x1 >> 16;
y1 = state->src.y1 >> 16;
if (!format->is_yuv ||
format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
p1_ba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * y1 +
format->char_per_block[0] * x1;
else if (format->format == DRM_FORMAT_NV12_10LE40)
p1_ba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * y1 +
format->char_per_block[0] * (x1 >> 2);
else if (format->format == DRM_FORMAT_UYVY ||
format->format == DRM_FORMAT_VYUY ||
format->format == DRM_FORMAT_YUYV ||
format->format == DRM_FORMAT_YVYU)
p1_ba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * y1 +
2 * format->char_per_block[0] * (x1 >> 1);
if (format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
p2_ba = cma_obj->paddr + fb->offsets[1] +
(((fb->pitches[1] >> 1) * (y1 >> 1) +
(x1 >> 1)) << 1);
else if (format->format == DRM_FORMAT_NV12_10LE40)
p2_ba = cma_obj->paddr + fb->offsets[1] +
(((fb->pitches[1] >> 1) * (y1 >> 1)) << 1) +
format->char_per_block[1] * (x1 >> 2);
dcss_dpr_addr_set(dcss->dpr, dcss_plane->ch_num, p1_ba, p2_ba,
fb->pitches[0]);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
dcss_plane_set_primary_base(dcss_plane, p1_ba);
else
dcss_plane_set_dtrc_base(dcss_plane,
cma_obj->paddr + fb->offsets[0],
cma_obj->paddr + fb->offsets[1]);
}
static bool dcss_plane_needs_setup(struct drm_plane_state *state,
struct drm_plane_state *old_state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
return state->crtc_x != old_state->crtc_x ||
state->crtc_y != old_state->crtc_y ||
state->crtc_w != old_state->crtc_w ||
state->crtc_h != old_state->crtc_h ||
state->src_x != old_state->src_x ||
state->src_y != old_state->src_y ||
state->src_w != old_state->src_w ||
state->src_h != old_state->src_h ||
fb->format->format != old_fb->format->format ||
fb->modifier != old_fb->modifier ||
state->rotation != old_state->rotation;
}
static void dcss_plane_setup_hdr10_pipes(struct drm_plane *plane)
{
struct dcss_dev *dcss = plane->dev->dev_private;
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct drm_plane_state *state = plane->state;
struct drm_crtc *crtc = state->crtc;
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct drm_framebuffer *fb = state->fb;
struct dcss_hdr10_pipe_cfg ipipe_cfg, opipe_cfg;
opipe_cfg.is_yuv = dcss_crtc->output_is_yuv;
opipe_cfg.g = dcss_crtc->opipe_g;
opipe_cfg.nl = dcss_crtc->opipe_nl;
opipe_cfg.pr = dcss_crtc->opipe_pr;
ipipe_cfg.is_yuv = fb->format->is_yuv;
if (!fb->format->is_yuv) {
ipipe_cfg.nl = NL_SRGB;
ipipe_cfg.pr = PR_FULL;
ipipe_cfg.g = G_ADOBE_ARGB;
goto setup;
}
switch (state->color_encoding) {
case DRM_COLOR_YCBCR_BT709:
ipipe_cfg.nl = NL_REC709;
ipipe_cfg.g = G_REC709;
break;
case DRM_COLOR_YCBCR_BT2020:
ipipe_cfg.nl = NL_REC2084;
ipipe_cfg.g = G_REC2020;
break;
default:
ipipe_cfg.nl = NL_REC709;
ipipe_cfg.g = G_REC601_PAL;
break;
}
ipipe_cfg.pr = state->color_range;
setup:
dcss_hdr10_setup(dcss->hdr10, dcss_plane->ch_num,
&ipipe_cfg, &opipe_cfg);
}
static void dcss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = plane->state;
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
u32 pixel_format;
struct drm_crtc_state *crtc_state;
bool modifiers_present;
u32 src_w, src_h, dst_w, dst_h;
struct drm_rect src, dst;
bool enable = true;
if (!fb || !state->crtc || !state->visible)
return;
pixel_format = fb->format->format;
crtc_state = state->crtc->state;
modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) &&
!dcss_plane_needs_setup(state, old_state) &&
!dcss_dtg_global_alpha_changed(dcss->dtg, dcss_plane->ch_num,
state->alpha >> 8)) {
dcss_plane_atomic_set_base(dcss_plane);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
dcss_dec400d_shadow_trig(dcss->dec400d);
return;
}
src = plane->state->src;
dst = plane->state->dst;
/*
* The width and height after clipping.
*/
src_w = drm_rect_width(&src) >> 16;
src_h = drm_rect_height(&src) >> 16;
dst_w = drm_rect_width(&dst);
dst_h = drm_rect_height(&dst);
dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, state->fb->format,
modifiers_present ? fb->modifier :
DRM_FORMAT_MOD_LINEAR);
if (dcss_plane->use_dtrc) {
u32 dtrc_w, dtrc_h;
dcss_dtrc_set_res(dcss->dtrc, dcss_plane->ch_num, state,
&dtrc_w, &dtrc_h);
dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, dtrc_w, dtrc_h);
} else {
dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h);
}
dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num,
state->rotation);
dcss_plane_atomic_set_base(dcss_plane);
dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
state->fb->format, src_w, src_h,
dst_w, dst_h,
drm_mode_vrefresh(&crtc_state->mode));
dcss_plane_setup_hdr10_pipes(plane);
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
dst.x1, dst.y1, dst_w, dst_h);
dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num,
fb->format, state->alpha >> 8);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
dcss_dec400d_enable(dcss->dec400d);
else if (dcss_plane->use_dtrc)
dcss_dtrc_enable(dcss->dtrc, dcss_plane->ch_num, true);
if (!dcss_plane->ch_num && (state->alpha >> 8) == 0)
enable = false;
dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable);
dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, enable);
if (!enable)
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
0, 0, 0, 0);
dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, enable);
}
static void dcss_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
if (dcss_plane->use_dtrc)
dcss_dtrc_enable(dcss->dtrc, dcss_plane->ch_num, false);
dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, false);
dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, false);
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0);
dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, false);
}
static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = dcss_plane_atomic_check,
.atomic_update = dcss_plane_atomic_update,
.atomic_disable = dcss_plane_atomic_disable,
};
struct dcss_plane *dcss_plane_init(struct drm_device *drm,
unsigned int possible_crtcs,
enum drm_plane_type type,
unsigned int zpos)
{
struct dcss_plane *dcss_plane;
const u64 *format_modifiers = dcss_video_format_modifiers;
const u32 *formats = dcss_video_formats;
u32 formats_size = ARRAY_SIZE(dcss_video_formats);
struct drm_property *prop;
int ret;
if (zpos > 2)
return ERR_PTR(-EINVAL);
dcss_plane = kzalloc(sizeof(*dcss_plane), GFP_KERNEL);
if (!dcss_plane) {
DRM_ERROR("failed to allocate plane\n");
return ERR_PTR(-ENOMEM);
}
if (type == DRM_PLANE_TYPE_PRIMARY) {
formats = dcss_graphics_formats;
formats_size = ARRAY_SIZE(dcss_graphics_formats);
format_modifiers = dcss_graphics_format_modifiers;
}
ret = drm_universal_plane_init(drm, &dcss_plane->base, possible_crtcs,
&dcss_plane_funcs, formats,
formats_size,
format_modifiers, type, NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
kfree(dcss_plane);
return ERR_PTR(ret);
}
drm_plane_helper_add(&dcss_plane->base, &dcss_plane_helper_funcs);
ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos);
if (ret)
return ERR_PTR(ret);
drm_plane_create_rotation_property(&dcss_plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 |
DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
dcss_plane->ch_num = 2 - zpos;
dcss_plane->type = type;
if (type == DRM_PLANE_TYPE_PRIMARY)
return dcss_plane;
prop = drm_property_create_range(drm, 0, "dtrc_table_ofs",
0, ULLONG_MAX);
if (!prop) {
DRM_ERROR("cannot create dtrc_table_ofs property\n");
return ERR_PTR(-ENOMEM);
}
dcss_plane->dtrc_table_ofs_prop = prop;
drm_object_attach_property(&dcss_plane->base.base, prop, 0);
return dcss_plane;
}

View File

@ -0,0 +1,119 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include "dcss-dev.h"
#define DCSS_RDSRC_CTRL_STATUS 0x00
#define RDSRC_RD_ERR BIT(31)
#define RDSRC_FRAME_COMP BIT(30)
#define RDSRC_FIFO_SIZE_POS 16
#define RDSRC_FIFO_SIZE_MASK GENMASK(22, 16)
#define RDSRC_RD_ERR_EN BIT(15)
#define RDSRC_FRAME_COMP_EN BIT(14)
#define RDSRC_P_SIZE_POS 7
#define RDSRC_P_SIZE_MASK GENMASK(9, 7)
#define RDSRC_T_SIZE_POS 5
#define RDSRC_T_SIZE_MASK GENMASK(6, 5)
#define RDSRC_BPP_POS 2
#define RDSRC_BPP_MASK GENMASK(4, 2)
#define RDSRC_ENABLE BIT(0)
#define DCSS_RDSRC_BASE_ADDR 0x10
#define DCSS_RDSRC_PITCH 0x14
#define DCSS_RDSRC_WIDTH 0x18
#define DCSS_RDSRC_HEIGHT 0x1C
struct dcss_rdsrc {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
u32 buf_addr;
u32 ctrl_status;
};
static void dcss_rdsrc_write(struct dcss_rdsrc *rdsrc, u32 val, u32 ofs)
{
dcss_ctxld_write(rdsrc->ctxld, rdsrc->ctx_id, val,
rdsrc->base_ofs + ofs);
}
int dcss_rdsrc_init(struct dcss_dev *dcss, unsigned long rdsrc_base)
{
struct dcss_rdsrc *rdsrc;
rdsrc = devm_kzalloc(dcss->dev, sizeof(*rdsrc), GFP_KERNEL);
if (!rdsrc)
return -ENOMEM;
rdsrc->base_reg = devm_ioremap(dcss->dev, rdsrc_base, SZ_4K);
if (!rdsrc->base_reg) {
dev_err(dcss->dev, "rdsrc: unable to remap base\n");
devm_kfree(dcss->dev, rdsrc);
return -ENOMEM;
}
dcss->rdsrc = rdsrc;
rdsrc->dev = dcss->dev;
rdsrc->base_ofs = rdsrc_base;
rdsrc->ctxld = dcss->ctxld;
rdsrc->ctx_id = CTX_SB_HP;
return 0;
}
void dcss_rdsrc_exit(struct dcss_rdsrc *rdsrc)
{
devm_iounmap(rdsrc->dev, rdsrc->base_reg);
devm_kfree(rdsrc->dev, rdsrc);
}
void dcss_rdsrc_setup(struct dcss_rdsrc *rdsrc, u32 pix_format, u32 dst_xres,
u32 dst_yres, u32 base_addr)
{
u32 buf_size, pitch, bpp;
/* since the scaler output is YUV444, the RDSRC output has to match */
bpp = 4;
rdsrc->ctrl_status = FIFO_512 << RDSRC_FIFO_SIZE_POS;
rdsrc->ctrl_status |= PSIZE_256 << RDSRC_P_SIZE_POS;
rdsrc->ctrl_status |= TSIZE_256 << RDSRC_T_SIZE_POS;
rdsrc->ctrl_status |= BPP_32_10BIT_OUTPUT << RDSRC_BPP_POS;
buf_size = dst_xres * dst_yres * bpp;
pitch = dst_xres * bpp;
rdsrc->buf_addr = base_addr;
dcss_rdsrc_write(rdsrc, rdsrc->buf_addr, DCSS_RDSRC_BASE_ADDR);
dcss_rdsrc_write(rdsrc, pitch, DCSS_RDSRC_PITCH);
dcss_rdsrc_write(rdsrc, dst_xres, DCSS_RDSRC_WIDTH);
dcss_rdsrc_write(rdsrc, dst_yres, DCSS_RDSRC_HEIGHT);
}
void dcss_rdsrc_enable(struct dcss_rdsrc *rdsrc)
{
dcss_rdsrc_write(rdsrc, rdsrc->ctrl_status, DCSS_RDSRC_CTRL_STATUS);
}
void dcss_rdsrc_disable(struct dcss_rdsrc *rdsrc)
{
/* RDSRC is turned off by setting the width and height to 0 */
dcss_rdsrc_write(rdsrc, 0, DCSS_RDSRC_WIDTH);
dcss_rdsrc_write(rdsrc, 0, DCSS_RDSRC_HEIGHT);
dcss_rdsrc_write(rdsrc, rdsrc->ctrl_status, DCSS_RDSRC_CTRL_STATUS);
}

View File

@ -0,0 +1,911 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*
* Scaling algorithms were contributed by Dzung Hoang <dzung.hoang@nxp.com>
*/
#include <linux/device.h>
#include "dcss-dev.h"
#define DCSS_SCALER_CTRL 0x00
#define SCALER_EN BIT(0)
#define REPEAT_EN BIT(4)
#define SCALE2MEM_EN BIT(8)
#define MEM2OFIFO_EN BIT(12)
#define DCSS_SCALER_OFIFO_CTRL 0x04
#define OFIFO_LOW_THRES_POS 0
#define OFIFO_LOW_THRES_MASK GENMASK(9, 0)
#define OFIFO_HIGH_THRES_POS 16
#define OFIFO_HIGH_THRES_MASK GENMASK(25, 16)
#define UNDERRUN_DETECT_CLR BIT(26)
#define LOW_THRES_DETECT_CLR BIT(27)
#define HIGH_THRES_DETECT_CLR BIT(28)
#define UNDERRUN_DETECT_EN BIT(29)
#define LOW_THRES_DETECT_EN BIT(30)
#define HIGH_THRES_DETECT_EN BIT(31)
#define DCSS_SCALER_SDATA_CTRL 0x08
#define YUV_EN BIT(0)
#define RTRAM_8LINES BIT(1)
#define Y_UV_BYTE_SWAP BIT(4)
#define A2R10G10B10_FORMAT_POS 8
#define A2R10G10B10_FORMAT_MASK GENMASK(11, 8)
#define DCSS_SCALER_BIT_DEPTH 0x0C
#define LUM_BIT_DEPTH_POS 0
#define LUM_BIT_DEPTH_MASK GENMASK(1, 0)
#define CHR_BIT_DEPTH_POS 4
#define CHR_BIT_DEPTH_MASK GENMASK(5, 4)
#define DCSS_SCALER_SRC_FORMAT 0x10
#define DCSS_SCALER_DST_FORMAT 0x14
#define FORMAT_MASK GENMASK(1, 0)
#define DCSS_SCALER_SRC_LUM_RES 0x18
#define DCSS_SCALER_SRC_CHR_RES 0x1C
#define DCSS_SCALER_DST_LUM_RES 0x20
#define DCSS_SCALER_DST_CHR_RES 0x24
#define WIDTH_POS 0
#define WIDTH_MASK GENMASK(11, 0)
#define HEIGHT_POS 16
#define HEIGHT_MASK GENMASK(27, 16)
#define DCSS_SCALER_V_LUM_START 0x48
#define V_START_MASK GENMASK(15, 0)
#define DCSS_SCALER_V_LUM_INC 0x4C
#define V_INC_MASK GENMASK(15, 0)
#define DCSS_SCALER_H_LUM_START 0x50
#define H_START_MASK GENMASK(18, 0)
#define DCSS_SCALER_H_LUM_INC 0x54
#define H_INC_MASK GENMASK(15, 0)
#define DCSS_SCALER_V_CHR_START 0x58
#define DCSS_SCALER_V_CHR_INC 0x5C
#define DCSS_SCALER_H_CHR_START 0x60
#define DCSS_SCALER_H_CHR_INC 0x64
#define DCSS_SCALER_COEF_VLUM 0x80
#define DCSS_SCALER_COEF_HLUM 0x140
#define DCSS_SCALER_COEF_VCHR 0x200
#define DCSS_SCALER_COEF_HCHR 0x300
struct dcss_scaler_ch {
void __iomem *base_reg;
u32 base_ofs;
struct dcss_scaler *scl;
u32 sdata_ctrl;
u32 scaler_ctrl;
bool scaler_ctrl_chgd;
u32 c_vstart;
u32 c_hstart;
int ch_num;
};
struct dcss_scaler {
struct device *dev;
struct dcss_ctxld *ctxld;
u32 ctx_id;
struct dcss_scaler_ch ch[3];
struct dcss_wrscl *wrscl;
struct dcss_rdsrc *rdsrc;
int ch_using_wrscl;
};
/* scaler coefficients generator */
#define PSC_FRAC_BITS 30
#define PSC_FRAC_SCALE BIT(PSC_FRAC_BITS)
#define PSC_BITS_FOR_PHASE 4
#define PSC_NUM_PHASES 16
#define PSC_STORED_PHASES (PSC_NUM_PHASES / 2 + 1)
#define PSC_NUM_TAPS 7
#define PSC_NUM_TAPS_RGBA 5
#define PSC_COEFF_PRECISION 10
#define PSC_PHASE_FRACTION_BITS 13
#define PSC_PHASE_MASK (PSC_NUM_PHASES - 1)
#define PSC_Q_FRACTION 19
#define PSC_Q_ROUND_OFFSET (1 << (PSC_Q_FRACTION - 1))
/**
* mult_q() - Performs fixed-point multiplication.
* @A: multiplier
* @B: multiplicand
*/
static int mult_q(int A, int B)
{
int result;
s64 temp;
temp = (int64_t)A * (int64_t)B;
temp += PSC_Q_ROUND_OFFSET;
result = (int)(temp >> PSC_Q_FRACTION);
return result;
}
/**
* div_q() - Performs fixed-point division.
* @A: dividend
* @B: divisor
*/
static int div_q(int A, int B)
{
int result;
s64 temp;
temp = (int64_t)A << PSC_Q_FRACTION;
if ((temp >= 0 && B >= 0) || (temp < 0 && B < 0))
temp += B / 2;
else
temp -= B / 2;
result = (int)(temp / B);
return result;
}
/**
* exp_approx_q() - Compute approximation to exp(x) function using Taylor
* series.
* @x: fixed-point argument of exp function
*/
static int exp_approx_q(int x)
{
int sum = 1 << PSC_Q_FRACTION;
int term = 1 << PSC_Q_FRACTION;
term = mult_q(term, div_q(x, 1 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 2 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 3 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 4 << PSC_Q_FRACTION));
sum += term;
return sum;
}
/**
* dcss_scaler_gaussian_filter() - Generate gaussian prototype filter.
* @fc_q: fixed-point cutoff frequency normalized to range [0, 1]
* @use_5_taps: indicates whether to use 5 taps or 7 taps
* @coef: output filter coefficients
*/
static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
bool phase0_identity,
int coef[][PSC_NUM_TAPS])
{
int sigma_q, g0_q, g1_q, g2_q;
int tap_cnt1, tap_cnt2, tap_idx, phase_cnt;
int mid;
int phase;
int i;
int taps;
if (use_5_taps)
for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
coef[phase][0] = 0;
coef[phase][PSC_NUM_TAPS - 1] = 0;
}
/* seed coefficient scanner */
taps = use_5_taps ? PSC_NUM_TAPS_RGBA : PSC_NUM_TAPS;
mid = (PSC_NUM_PHASES * taps) / 2 - 1;
phase_cnt = (PSC_NUM_PHASES * (PSC_NUM_TAPS + 1)) / 2;
tap_cnt1 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
tap_cnt2 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
/* seed gaussian filter generator */
sigma_q = div_q(PSC_Q_ROUND_OFFSET, fc_q);
g0_q = 1 << PSC_Q_FRACTION;
g1_q = exp_approx_q(div_q(-PSC_Q_ROUND_OFFSET,
mult_q(sigma_q, sigma_q)));
g2_q = mult_q(g1_q, g1_q);
coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = g0_q;
for (i = 0; i < mid; i++) {
phase_cnt++;
tap_cnt1--;
tap_cnt2++;
g0_q = mult_q(g0_q, g1_q);
g1_q = mult_q(g1_q, g2_q);
if ((phase_cnt & PSC_PHASE_MASK) <= 8) {
tap_idx = tap_cnt1 >> PSC_BITS_FOR_PHASE;
coef[phase_cnt & PSC_PHASE_MASK][tap_idx] = g0_q;
}
if (((-phase_cnt) & PSC_PHASE_MASK) <= 8) {
tap_idx = tap_cnt2 >> PSC_BITS_FOR_PHASE;
coef[(-phase_cnt) & PSC_PHASE_MASK][tap_idx] = g0_q;
}
}
phase_cnt++;
tap_cnt1--;
coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = 0;
/* override phase 0 with identity filter if specified */
if (phase0_identity)
for (i = 0; i < PSC_NUM_TAPS; i++)
coef[0][i] = i == (PSC_NUM_TAPS >> 1) ?
(1 << PSC_COEFF_PRECISION) : 0;
/* normalize coef */
for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
int sum = 0;
s64 ll_temp;
for (i = 0; i < PSC_NUM_TAPS; i++)
sum += coef[phase][i];
for (i = 0; i < PSC_NUM_TAPS; i++) {
ll_temp = coef[phase][i];
ll_temp <<= PSC_COEFF_PRECISION;
ll_temp += sum >> 1;
ll_temp /= sum;
coef[phase][i] = (int)ll_temp;
}
}
}
/**
* dcss_scaler_filter_design() - Compute filter coefficients using
* Gaussian filter.
* @src_length: length of input
* @dst_length: length of output
* @use_5_taps: 0 for 7 taps per phase, 1 for 5 taps
* @coef: output coefficients
*/
static void dcss_scaler_filter_design(int src_length, int dst_length,
bool use_5_taps, bool phase0_identity,
int coef[][PSC_NUM_TAPS])
{
int fc_q;
/* compute cutoff frequency */
if (dst_length >= src_length)
fc_q = div_q(1, PSC_NUM_PHASES);
else
fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES);
/* compute gaussian filter coefficients */
dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
}
static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs)
{
struct dcss_scaler *scl = ch->scl;
dcss_ctxld_write(scl->ctxld, scl->ctx_id, val, ch->base_ofs + ofs);
}
static int dcss_scaler_ch_init_all(struct dcss_scaler *scl,
unsigned long scaler_base)
{
struct dcss_scaler_ch *ch;
int i;
for (i = 0; i < 3; i++) {
ch = &scl->ch[i];
ch->base_ofs = scaler_base + i * 0x400;
ch->base_reg = devm_ioremap(scl->dev, ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(scl->dev, "scaler: unable to remap ch base\n");
return -ENOMEM;
}
ch->scl = scl;
ch->ch_num = i;
}
return 0;
}
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
{
struct dcss_scaler *scaler;
scaler = devm_kzalloc(dcss->dev, sizeof(*scaler), GFP_KERNEL);
if (!scaler)
return -ENOMEM;
dcss->scaler = scaler;
scaler->dev = dcss->dev;
scaler->ctxld = dcss->ctxld;
scaler->ctx_id = CTX_SB_HP;
scaler->wrscl = dcss->wrscl;
scaler->rdsrc = dcss->rdsrc;
scaler->ch_using_wrscl = -1;
if (dcss_scaler_ch_init_all(scaler, scaler_base)) {
int i;
for (i = 0; i < 3; i++) {
if (scaler->ch[i].base_reg)
devm_iounmap(scaler->dev,
scaler->ch[i].base_reg);
}
devm_kfree(scaler->dev, scaler);
return -ENOMEM;
}
return 0;
}
void dcss_scaler_exit(struct dcss_scaler *scl)
{
int ch_no;
for (ch_no = 0; ch_no < 3; ch_no++) {
struct dcss_scaler_ch *ch = &scl->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL);
if (ch->base_reg)
devm_iounmap(scl->dev, ch->base_reg);
}
devm_kfree(scl->dev, scl);
}
void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en)
{
struct dcss_scaler_ch *ch = &scl->ch[ch_num];
u32 scaler_ctrl;
if (scl->ch_using_wrscl == ch_num) {
if (en) {
scaler_ctrl = SCALE2MEM_EN | MEM2OFIFO_EN | REPEAT_EN;
} else {
dcss_wrscl_disable(scl->wrscl);
dcss_rdsrc_disable(scl->rdsrc);
scl->ch_using_wrscl = -1;
scaler_ctrl = 0;
}
} else {
scaler_ctrl = en ? SCALER_EN | REPEAT_EN : 0;
}
if (en)
dcss_scaler_write(ch, ch->sdata_ctrl, DCSS_SCALER_SDATA_CTRL);
if (ch->scaler_ctrl != scaler_ctrl)
ch->scaler_ctrl_chgd = true;
ch->scaler_ctrl = scaler_ctrl;
}
static void dcss_scaler_yuv_enable(struct dcss_scaler_ch *ch, bool en)
{
ch->sdata_ctrl &= ~YUV_EN;
ch->sdata_ctrl |= en ? YUV_EN : 0;
}
static void dcss_scaler_rtr_8lines_enable(struct dcss_scaler_ch *ch, bool en)
{
ch->sdata_ctrl &= ~RTRAM_8LINES;
ch->sdata_ctrl |= en ? RTRAM_8LINES : 0;
}
static void dcss_scaler_bit_depth_set(struct dcss_scaler_ch *ch, int depth)
{
u32 val;
val = depth == 30 ? 2 : 0;
dcss_scaler_write(ch,
((val << CHR_BIT_DEPTH_POS) & CHR_BIT_DEPTH_MASK) |
((val << LUM_BIT_DEPTH_POS) & LUM_BIT_DEPTH_MASK),
DCSS_SCALER_BIT_DEPTH);
}
enum buffer_format {
BUF_FMT_YUV420,
BUF_FMT_YUV422,
BUF_FMT_ARGB8888_YUV444,
};
enum chroma_location {
PSC_LOC_HORZ_0_VERT_1_OVER_4 = 0,
PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4 = 1,
PSC_LOC_HORZ_0_VERT_0 = 2,
PSC_LOC_HORZ_1_OVER_4_VERT_0 = 3,
PSC_LOC_HORZ_0_VERT_1_OVER_2 = 4,
PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2 = 5
};
static void dcss_scaler_format_set(struct dcss_scaler_ch *ch,
enum buffer_format src_fmt,
enum buffer_format dst_fmt)
{
dcss_scaler_write(ch, src_fmt, DCSS_SCALER_SRC_FORMAT);
dcss_scaler_write(ch, dst_fmt, DCSS_SCALER_DST_FORMAT);
}
static void dcss_scaler_res_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres,
int dst_xres, int dst_yres,
u32 pix_format, enum buffer_format dst_format)
{
u32 lsrc_xres, lsrc_yres, csrc_xres, csrc_yres;
u32 ldst_xres, ldst_yres, cdst_xres, cdst_yres;
bool src_is_444 = true;
lsrc_xres = src_xres;
csrc_xres = src_xres;
lsrc_yres = src_yres;
csrc_yres = src_yres;
ldst_xres = dst_xres;
cdst_xres = dst_xres;
ldst_yres = dst_yres;
cdst_yres = dst_yres;
if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY ||
pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) {
csrc_xres >>= 1;
src_is_444 = false;
} else if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21 ||
pix_format == DRM_FORMAT_NV12_10LE40) {
csrc_xres >>= 1;
csrc_yres >>= 1;
src_is_444 = false;
}
if (dst_format == BUF_FMT_YUV422)
cdst_xres >>= 1;
/* for 4:4:4 to 4:2:2 conversion, source height should be 1 less */
if (src_is_444 && dst_format == BUF_FMT_YUV422) {
lsrc_yres--;
csrc_yres--;
}
dcss_scaler_write(ch, (((lsrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((lsrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_SRC_LUM_RES);
dcss_scaler_write(ch, (((csrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((csrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_SRC_CHR_RES);
dcss_scaler_write(ch, (((ldst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((ldst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_DST_LUM_RES);
dcss_scaler_write(ch, (((cdst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((cdst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_DST_CHR_RES);
}
#define downscale_fp(factor, fp_pos) ((factor) << (fp_pos))
#define upscale_fp(factor, fp_pos) ((1 << (fp_pos)) / (factor))
struct dcss_scaler_factors {
int downscale;
int upscale;
};
static const struct dcss_scaler_factors dcss_scaler_factors[] = {
{3, 8}, {5, 8}, {5, 8},
};
static const struct dcss_scaler_factors dcss_scaler_wrscl_factors[] = {
{5, 8}, {7, 8}, {7, 8},
};
static bool dcss_scaler_fractions_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres,
int dst_xres, int dst_yres,
u32 src_format, u32 dst_format,
enum chroma_location src_chroma_loc)
{
int src_c_xres, src_c_yres, dst_c_xres, dst_c_yres;
u32 l_vinc, l_hinc, c_vinc, c_hinc;
u32 c_vstart, c_hstart;
u8 upscale_factor, downscale_factor;
src_c_xres = src_xres;
src_c_yres = src_yres;
dst_c_xres = dst_xres;
dst_c_yres = dst_yres;
c_vstart = 0;
c_hstart = 0;
/* adjustments for source chroma location */
if (src_format == BUF_FMT_YUV420) {
/* vertical input chroma position adjustment */
switch (src_chroma_loc) {
case PSC_LOC_HORZ_0_VERT_1_OVER_4:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
/*
* move chroma up to first luma line
* (1/4 chroma input line spacing)
*/
c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
break;
case PSC_LOC_HORZ_0_VERT_1_OVER_2:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
/*
* move chroma up to first luma line
* (1/2 chroma input line spacing)
*/
c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 1));
break;
default:
break;
}
/* horizontal input chroma position adjustment */
switch (src_chroma_loc) {
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
case PSC_LOC_HORZ_1_OVER_4_VERT_0:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
/* move chroma left 1/4 chroma input sample spacing */
c_hstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
break;
default:
break;
}
}
/* adjustments to chroma resolution */
if (src_format == BUF_FMT_YUV420) {
src_c_xres >>= 1;
src_c_yres >>= 1;
} else if (src_format == BUF_FMT_YUV422) {
src_c_xres >>= 1;
}
if (dst_format == BUF_FMT_YUV422)
dst_c_xres >>= 1;
l_vinc = ((src_yres << 13) + (dst_yres >> 1)) / dst_yres;
c_vinc = ((src_c_yres << 13) + (dst_c_yres >> 1)) / dst_c_yres;
l_hinc = ((src_xres << 13) + (dst_xres >> 1)) / dst_xres;
c_hinc = ((src_c_xres << 13) + (dst_c_xres >> 1)) / dst_c_xres;
/* save chroma start phase */
ch->c_vstart = c_vstart;
ch->c_hstart = c_hstart;
dcss_scaler_write(ch, 0, DCSS_SCALER_V_LUM_START);
dcss_scaler_write(ch, l_vinc, DCSS_SCALER_V_LUM_INC);
dcss_scaler_write(ch, 0, DCSS_SCALER_H_LUM_START);
dcss_scaler_write(ch, l_hinc, DCSS_SCALER_H_LUM_INC);
dcss_scaler_write(ch, c_vstart, DCSS_SCALER_V_CHR_START);
dcss_scaler_write(ch, c_vinc, DCSS_SCALER_V_CHR_INC);
dcss_scaler_write(ch, c_hstart, DCSS_SCALER_H_CHR_START);
dcss_scaler_write(ch, c_hinc, DCSS_SCALER_H_CHR_INC);
downscale_factor = dcss_scaler_factors[ch->ch_num].downscale;
upscale_factor = dcss_scaler_factors[ch->ch_num].upscale;
/* return if WR_SCL/RD_SRC is needed to scale */
return l_vinc > downscale_fp(downscale_factor, 13) ||
l_vinc < upscale_fp(upscale_factor, 13) ||
l_hinc > downscale_fp(downscale_factor, 13) ||
l_hinc < upscale_fp(upscale_factor, 13);
}
int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
int *min, int *max)
{
const struct dcss_scaler_factors *factors_map = dcss_scaler_factors;
if (scl->ch_using_wrscl == -1 || scl->ch_using_wrscl == ch_num)
factors_map = dcss_scaler_wrscl_factors;
*min = upscale_fp(factors_map[ch_num].upscale, 16);
*max = downscale_fp(factors_map[ch_num].downscale, 16);
return 0;
}
static void dcss_scaler_program_5_coef_set(struct dcss_scaler_ch *ch,
int base_addr,
int coef[][PSC_NUM_TAPS])
{
int i, phase;
for (i = 0; i < PSC_STORED_PHASES; i++) {
dcss_scaler_write(ch, ((coef[i][1] & 0xfff) << 16 |
(coef[i][2] & 0xfff) << 4 |
(coef[i][3] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][3] & 0x0ff) << 20 |
(coef[i][4] & 0xfff) << 8 |
(coef[i][5] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][5] & 0x00f) << 24),
base_addr + 0x80 + i * sizeof(u32));
}
/* reverse both phase and tap orderings */
for (phase = (PSC_NUM_PHASES >> 1) - 1;
i < PSC_NUM_PHASES; i++, phase--) {
dcss_scaler_write(ch, ((coef[phase][5] & 0xfff) << 16 |
(coef[phase][4] & 0xfff) << 4 |
(coef[phase][3] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][3] & 0x0ff) << 20 |
(coef[phase][2] & 0xfff) << 8 |
(coef[phase][1] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][1] & 0x00f) << 24),
base_addr + 0x80 + i * sizeof(u32));
}
}
static void dcss_scaler_program_7_coef_set(struct dcss_scaler_ch *ch,
int base_addr,
int coef[][PSC_NUM_TAPS])
{
int i, phase;
for (i = 0; i < PSC_STORED_PHASES; i++) {
dcss_scaler_write(ch, ((coef[i][0] & 0xfff) << 16 |
(coef[i][1] & 0xfff) << 4 |
(coef[i][2] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][2] & 0x0ff) << 20 |
(coef[i][3] & 0xfff) << 8 |
(coef[i][4] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][4] & 0x00f) << 24 |
(coef[i][5] & 0xfff) << 12 |
(coef[i][6] & 0xfff)),
base_addr + 0x80 + i * sizeof(u32));
}
/* reverse both phase and tap orderings */
for (phase = (PSC_NUM_PHASES >> 1) - 1;
i < PSC_NUM_PHASES; i++, phase--) {
dcss_scaler_write(ch, ((coef[phase][6] & 0xfff) << 16 |
(coef[phase][5] & 0xfff) << 4 |
(coef[phase][4] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][4] & 0x0ff) << 20 |
(coef[phase][3] & 0xfff) << 8 |
(coef[phase][2] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][2] & 0x00f) << 24 |
(coef[phase][1] & 0xfff) << 12 |
(coef[phase][0] & 0xfff)),
base_addr + 0x80 + i * sizeof(u32));
}
}
static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
enum buffer_format src_format,
enum buffer_format dst_format,
bool use_5_taps,
int src_xres, int src_yres, int dst_xres,
int dst_yres)
{
int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
bool program_5_taps = use_5_taps ||
(dst_format == BUF_FMT_YUV422 &&
src_format == BUF_FMT_ARGB8888_YUV444);
/* horizontal luma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
src_xres == dst_xres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical luma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
src_yres == dst_yres, coef);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
else
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
/* adjust chroma resolution */
if (src_format != BUF_FMT_ARGB8888_YUV444)
src_xres >>= 1;
if (src_format == BUF_FMT_YUV420)
src_yres >>= 1;
if (dst_format != BUF_FMT_ARGB8888_YUV444)
dst_xres >>= 1;
if (dst_format == BUF_FMT_YUV420) /* should not happen */
dst_yres >>= 1;
/* horizontal chroma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
(src_xres == dst_xres) && (ch->c_hstart == 0),
coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef);
/* vertical chroma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
(src_yres == dst_yres) && (ch->c_vstart == 0),
coef);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
else
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
}
static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres, int dst_xres,
int dst_yres)
{
int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
/* horizontal RGB */
dcss_scaler_filter_design(src_xres, dst_xres, false,
src_xres == dst_xres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical RGB */
dcss_scaler_filter_design(src_yres, dst_yres, false,
src_yres == dst_yres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
}
static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch,
const struct drm_format_info *format)
{
u32 a2r10g10b10_format;
if (format->is_yuv)
return;
ch->sdata_ctrl &= ~A2R10G10B10_FORMAT_MASK;
if (format->depth != 30)
return;
switch (format->format) {
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XRGB2101010:
a2r10g10b10_format = 0;
break;
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XBGR2101010:
a2r10g10b10_format = 5;
break;
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_RGBX1010102:
a2r10g10b10_format = 6;
break;
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_BGRX1010102:
a2r10g10b10_format = 11;
break;
default:
a2r10g10b10_format = 0;
break;
}
ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS;
}
static void dcss_scaler_setup_path(struct dcss_scaler_ch *ch,
u32 pix_format, int dst_xres,
int dst_yres, u32 vrefresh_hz,
bool wrscl_needed)
{
struct dcss_scaler *scl = ch->scl;
u32 base_addr;
/* nothing to do if WRSCL path is needed but it's already used */
if (wrscl_needed && scl->ch_using_wrscl != -1 &&
scl->ch_using_wrscl != ch->ch_num)
return;
if (!wrscl_needed) {
/* Channel has finished using WRSCL. Release WRSCL/RDSRC. */
if (scl->ch_using_wrscl == ch->ch_num) {
dcss_wrscl_disable(scl->wrscl);
dcss_rdsrc_disable(scl->rdsrc);
scl->ch_using_wrscl = -1;
}
return;
}
base_addr = dcss_wrscl_setup(scl->wrscl, pix_format, vrefresh_hz,
dst_xres, dst_yres);
dcss_rdsrc_setup(scl->rdsrc, pix_format, dst_xres, dst_yres,
base_addr);
dcss_wrscl_enable(scl->wrscl);
dcss_rdsrc_enable(scl->rdsrc);
scl->ch_using_wrscl = ch->ch_num;
}
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
u32 vrefresh_hz)
{
struct dcss_scaler_ch *ch = &scl->ch[ch_num];
unsigned int pixel_depth = 0;
bool rtr_8line_en = false;
bool use_5_taps = false;
enum buffer_format src_format = BUF_FMT_ARGB8888_YUV444;
enum buffer_format dst_format = BUF_FMT_ARGB8888_YUV444;
u32 pix_format = format->format;
bool use_wrscl;
if (format->is_yuv) {
dcss_scaler_yuv_enable(ch, true);
if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21 ||
pix_format == DRM_FORMAT_NV12_10LE40) {
rtr_8line_en = true;
src_format = BUF_FMT_YUV420;
} else if (pix_format == DRM_FORMAT_UYVY ||
pix_format == DRM_FORMAT_VYUY ||
pix_format == DRM_FORMAT_YUYV ||
pix_format == DRM_FORMAT_YVYU) {
src_format = BUF_FMT_YUV422;
}
use_5_taps = !rtr_8line_en;
if (pix_format == DRM_FORMAT_NV12_10LE40)
pixel_depth = 30;
} else {
dcss_scaler_yuv_enable(ch, false);
pixel_depth = format->depth;
}
use_wrscl = dcss_scaler_fractions_set(ch, src_xres, src_yres, dst_xres,
dst_yres, src_format, dst_format,
PSC_LOC_HORZ_0_VERT_1_OVER_4);
if (format->is_yuv)
dcss_scaler_yuv_coef_set(ch, src_format, dst_format,
use_5_taps, src_xres, src_yres,
dst_xres, dst_yres);
else
dcss_scaler_rgb_coef_set(ch, src_xres, src_yres,
dst_xres, dst_yres);
dcss_scaler_rtr_8lines_enable(ch, rtr_8line_en);
dcss_scaler_bit_depth_set(ch, pixel_depth);
dcss_scaler_set_rgb10_order(ch, format);
dcss_scaler_format_set(ch, src_format, dst_format);
dcss_scaler_res_set(ch, src_xres, src_yres, dst_xres, dst_yres,
pix_format, dst_format);
dcss_scaler_setup_path(ch, pix_format, dst_xres,
dst_yres, vrefresh_hz, use_wrscl);
}
/* This function will be called from interrupt context. */
void dcss_scaler_write_sclctrl(struct dcss_scaler *scl)
{
int chnum;
for (chnum = 0; chnum < 3; chnum++) {
struct dcss_scaler_ch *ch = &scl->ch[chnum];
if (ch->scaler_ctrl_chgd) {
dcss_ctxld_write_irqsafe(scl->ctxld, scl->ctx_id,
ch->scaler_ctrl,
ch->base_ofs +
DCSS_SCALER_CTRL);
ch->scaler_ctrl_chgd = false;
}
}
}

View File

@ -0,0 +1,179 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include "dcss-dev.h"
#define DCSS_SS_SYS_CTRL 0x00
#define RUN_EN BIT(0)
#define DCSS_SS_DISPLAY 0x10
#define LRC_X_POS 0
#define LRC_X_MASK GENMASK(12, 0)
#define LRC_Y_POS 16
#define LRC_Y_MASK GENMASK(28, 16)
#define DCSS_SS_HSYNC 0x20
#define DCSS_SS_VSYNC 0x30
#define SYNC_START_POS 0
#define SYNC_START_MASK GENMASK(12, 0)
#define SYNC_END_POS 16
#define SYNC_END_MASK GENMASK(28, 16)
#define SYNC_POL BIT(31)
#define DCSS_SS_DE_ULC 0x40
#define ULC_X_POS 0
#define ULC_X_MASK GENMASK(12, 0)
#define ULC_Y_POS 16
#define ULC_Y_MASK GENMASK(28, 16)
#define ULC_POL BIT(31)
#define DCSS_SS_DE_LRC 0x50
#define DCSS_SS_MODE 0x60
#define PIPE_MODE_POS 0
#define PIPE_MODE_MASK GENMASK(1, 0)
#define DCSS_SS_COEFF 0x70
#define HORIZ_A_POS 0
#define HORIZ_A_MASK GENMASK(3, 0)
#define HORIZ_B_POS 4
#define HORIZ_B_MASK GENMASK(7, 4)
#define HORIZ_C_POS 8
#define HORIZ_C_MASK GENMASK(11, 8)
#define HORIZ_H_NORM_POS 12
#define HORIZ_H_NORM_MASK GENMASK(14, 12)
#define VERT_A_POS 16
#define VERT_A_MASK GENMASK(19, 16)
#define VERT_B_POS 20
#define VERT_B_MASK GENMASK(23, 20)
#define VERT_C_POS 24
#define VERT_C_MASK GENMASK(27, 24)
#define VERT_H_NORM_POS 28
#define VERT_H_NORM_MASK GENMASK(30, 28)
#define DCSS_SS_CLIP_CB 0x80
#define DCSS_SS_CLIP_CR 0x90
#define CLIP_MIN_POS 0
#define CLIP_MIN_MASK GENMASK(9, 0)
#define CLIP_MAX_POS 0
#define CLIP_MAX_MASK GENMASK(23, 16)
#define DCSS_SS_INTER_MODE 0xA0
#define INT_EN BIT(0)
#define VSYNC_SHIFT BIT(1)
struct dcss_ss {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
bool in_use;
};
static void dcss_ss_write(struct dcss_ss *ss, u32 val, u32 ofs)
{
if (!ss->in_use)
dcss_writel(val, ss->base_reg + ofs);
dcss_ctxld_write(ss->ctxld, ss->ctx_id, val,
ss->base_ofs + ofs);
}
int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
{
struct dcss_ss *ss;
ss = devm_kzalloc(dcss->dev, sizeof(*ss), GFP_KERNEL);
if (!ss)
return -ENOMEM;
dcss->ss = ss;
ss->dev = dcss->dev;
ss->ctxld = dcss->ctxld;
ss->base_reg = devm_ioremap(dcss->dev, ss_base, SZ_4K);
if (!ss->base_reg) {
dev_err(dcss->dev, "ss: unable to remap ss base\n");
devm_kfree(ss->dev, ss);
return -ENOMEM;
}
ss->base_ofs = ss_base;
ss->ctx_id = CTX_SB_HP;
return 0;
}
void dcss_ss_exit(struct dcss_ss *ss)
{
/* stop SS */
dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
if (ss->base_reg)
devm_iounmap(ss->dev, ss->base_reg);
devm_kfree(ss->dev, ss);
}
void dcss_ss_subsam_set(struct dcss_ss *ss, bool out_is_yuv)
{
dcss_ss_write(ss, out_is_yuv ? 0x21612161 : 0x41614161, DCSS_SS_COEFF);
dcss_ss_write(ss, out_is_yuv ? 2 : 0, DCSS_SS_MODE);
dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CB);
dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CR);
}
void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
bool phsync, bool pvsync)
{
u16 lrc_x, lrc_y;
u16 hsync_start, hsync_end;
u16 vsync_start, vsync_end;
u16 de_ulc_x, de_ulc_y;
u16 de_lrc_x, de_lrc_y;
lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
vm->vactive - 1;
dcss_ss_write(ss, (lrc_y << LRC_Y_POS) | lrc_x, DCSS_SS_DISPLAY);
hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
hsync_end = vm->hsync_len - 1;
dcss_ss_write(ss, (phsync ? SYNC_POL : 0) |
((u32)hsync_end << SYNC_END_POS) | hsync_start,
DCSS_SS_HSYNC);
vsync_start = vm->vfront_porch - 1;
vsync_end = vm->vfront_porch + vm->vsync_len - 1;
dcss_ss_write(ss, (pvsync ? SYNC_POL : 0) |
((u32)vsync_end << SYNC_END_POS) | vsync_start,
DCSS_SS_VSYNC);
de_ulc_x = vm->hsync_len + vm->hback_porch - 1;
de_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch;
dcss_ss_write(ss, SYNC_POL | ((u32)de_ulc_y << ULC_Y_POS) | de_ulc_x,
DCSS_SS_DE_ULC);
de_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
de_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
vm->vactive - 1;
dcss_ss_write(ss, (de_lrc_y << LRC_Y_POS) | de_lrc_x, DCSS_SS_DE_LRC);
}
void dcss_ss_enable(struct dcss_ss *ss)
{
dcss_ss_write(ss, RUN_EN, DCSS_SS_SYS_CTRL);
ss->in_use = true;
}
void dcss_ss_disable(struct dcss_ss *ss)
{
dcss_ss_write(ss, 0, DCSS_SS_SYS_CTRL);
ss->in_use = false;
}

View File

@ -0,0 +1,158 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include "dcss-dev.h"
#define DCSS_WRSCL_CTRL_STATUS 0x00
#define WRSCL_ERR BIT(31)
#define WRSCL_ERR_EN BIT(30)
#define WRSCL_FRAME_COMP BIT(29)
#define WRSCL_FRAME_COMP_EN BIT(28)
#define WRSCL_FIFO_SIZE_POS 18
#define WRSCL_FIFO_SIZE_MASK GENMAK(24, 18)
#define WRSCL_P_FREQ_POS 10
#define WRSCL_P_FREQ_MASK GENMASK(17, 10)
#define WRSCL_P_SIZE_POS 7
#define WRSCL_P_SIZE_MASK GENMASK(9, 7)
#define WRSCL_T_SIZE_POS 5
#define WRSCL_T_SIZE_MASK GENMASK(6, 5)
#define WRSCL_BPP_POS 2
#define WRSCL_BPP_MASK GENMASK(4, 2)
#define WRSCL_REPEAT BIT(1)
#define WRSCL_ENABLE BIT(0)
#define DCSS_WRSCL_BASE_ADDR 0x10
#define DCSS_WRSCL_PITCH 0x14
struct dcss_wrscl {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
u32 buf_size;
u32 buf_addr;
void *buf_vaddr;
struct clk *bclk;
u32 ctrl_status;
};
static void dcss_wrscl_write(struct dcss_wrscl *wrscl, u32 val, u32 ofs)
{
dcss_ctxld_write(wrscl->ctxld, wrscl->ctx_id,
val, wrscl->base_ofs + ofs);
}
int dcss_wrscl_init(struct dcss_dev *dcss, unsigned long wrscl_base)
{
struct dcss_wrscl *wrscl;
wrscl = devm_kzalloc(dcss->dev, sizeof(*wrscl), GFP_KERNEL);
if (!wrscl)
return -ENOMEM;
wrscl->base_reg = devm_ioremap(dcss->dev, wrscl_base, SZ_4K);
if (!wrscl->base_reg) {
dev_err(dcss->dev, "wrscl: unable to remap base\n");
devm_kfree(dcss->dev, wrscl);
return -ENOMEM;
}
dcss->wrscl = wrscl;
wrscl->dev = dcss->dev;
wrscl->base_ofs = wrscl_base;
wrscl->ctxld = dcss->ctxld;
wrscl->ctx_id = CTX_SB_HP;
wrscl->bclk = dcss->axi_clk;
return 0;
}
void dcss_wrscl_exit(struct dcss_wrscl *wrscl)
{
devm_iounmap(wrscl->dev, wrscl->base_reg);
devm_kfree(wrscl->dev, wrscl);
}
static const u16 dcss_wrscl_psize_map[] = {64, 128, 256, 512, 1024, 2048, 4096};
u32 dcss_wrscl_setup(struct dcss_wrscl *wrscl, u32 pix_format, u32 vrefresh_hz,
u32 dst_xres, u32 dst_yres)
{
u32 pitch, p_size, p_freq, bpp;
dma_addr_t dma_handle;
u32 bclk_rate = clk_get_rate(wrscl->bclk);
/* we'd better release the old buffer */
if (wrscl->buf_addr)
dmam_free_coherent(wrscl->dev, wrscl->buf_size,
wrscl->buf_vaddr, wrscl->buf_addr);
p_size = PSIZE_256;
/* scaler output is YUV444 */
bpp = 4;
/* spread the load over the entire frame */
p_freq = ((u64)bclk_rate * dcss_wrscl_psize_map[p_size]) /
((u64)dst_xres * dst_yres * vrefresh_hz * bpp * 8);
/* choose a slightly smaller p_freq */
p_freq = p_freq - 3 > 255 ? 255 : p_freq - 3;
wrscl->ctrl_status = FIFO_512 << WRSCL_FIFO_SIZE_POS;
wrscl->ctrl_status |= p_size << WRSCL_P_SIZE_POS;
wrscl->ctrl_status |= TSIZE_256 << WRSCL_T_SIZE_POS;
wrscl->ctrl_status |= BPP_32_10BIT_OUTPUT << WRSCL_BPP_POS;
wrscl->ctrl_status |= p_freq << WRSCL_P_FREQ_POS;
wrscl->buf_size = dst_xres * dst_yres * bpp;
pitch = dst_xres * bpp;
wrscl->buf_vaddr = dmam_alloc_coherent(wrscl->dev, wrscl->buf_size,
&dma_handle, GFP_KERNEL);
if (!wrscl->buf_vaddr) {
dev_err(wrscl->dev, "wrscl: cannot alloc buf mem\n");
return 0;
}
wrscl->buf_addr = dma_handle;
dcss_wrscl_write(wrscl, wrscl->buf_addr, DCSS_WRSCL_BASE_ADDR);
dcss_wrscl_write(wrscl, pitch, DCSS_WRSCL_PITCH);
return wrscl->buf_addr;
}
void dcss_wrscl_enable(struct dcss_wrscl *wrscl)
{
wrscl->ctrl_status |= WRSCL_ENABLE | WRSCL_REPEAT;
dcss_wrscl_write(wrscl, wrscl->ctrl_status, DCSS_WRSCL_CTRL_STATUS);
}
void dcss_wrscl_disable(struct dcss_wrscl *wrscl)
{
wrscl->ctrl_status &= ~(WRSCL_ENABLE | WRSCL_REPEAT);
dcss_wrscl_write(wrscl, wrscl->ctrl_status, DCSS_WRSCL_CTRL_STATUS);
if (wrscl->buf_addr) {
dmam_free_coherent(wrscl->dev, wrscl->buf_size,
wrscl->buf_vaddr, wrscl->buf_addr);
wrscl->buf_addr = 0;
}
}

View File

@ -0,0 +1,77 @@
/*
* Copyright 2012-2016 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARCH_MXC_BUSFREQ_H__
#define __ASM_ARCH_MXC_BUSFREQ_H__
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
/*
* This enumerates busfreq low power mode entry and exit.
*/
enum busfreq_event {
LOW_BUSFREQ_ENTER,
LOW_BUSFREQ_EXIT,
};
/*
* This enumerates the system bus and ddr frequencies in various modes.
* BUS_FREQ_HIGH - DDR @ 528MHz, AHB @ 132MHz.
* BUS_FREQ_MED - DDR @ 400MHz, AHB @ 132MHz
* BUS_FREQ_AUDIO - DDR @ 50MHz/100MHz, AHB @ 24MHz.
* BUS_FREQ_LOW - DDR @ 24MHz, AHB @ 24MHz.
* BUS_FREQ_ULTRA_LOW - DDR @ 1MHz, AHB - 3MHz.
*
* Drivers need to request/release the bus/ddr frequencies based on
* their performance requirements. Drivers cannot request/release
* BUS_FREQ_ULTRA_LOW mode as this mode is automatically entered from
* either BUS_FREQ_AUDIO or BUS_FREQ_LOW
* modes.
*/
enum bus_freq_mode {
BUS_FREQ_HIGH,
BUS_FREQ_MED,
BUS_FREQ_AUDIO,
BUS_FREQ_LOW,
BUS_FREQ_ULTRA_LOW,
};
#if defined(CONFIG_HAVE_IMX_BUSFREQ) && !defined(CONFIG_ARM64)
extern struct regulator *arm_reg;
extern struct regulator *soc_reg;
void request_bus_freq(enum bus_freq_mode mode);
void release_bus_freq(enum bus_freq_mode mode);
int register_busfreq_notifier(struct notifier_block *nb);
int unregister_busfreq_notifier(struct notifier_block *nb);
int get_bus_freq_mode(void);
#elif defined(CONFIG_HAVE_IMX_BUSFREQ)
void request_bus_freq(enum bus_freq_mode mode);
void release_bus_freq(enum bus_freq_mode mode);
int get_bus_freq_mode(void);
#else
static inline void request_bus_freq(enum bus_freq_mode mode)
{
}
static inline void release_bus_freq(enum bus_freq_mode mode)
{
}
static inline int register_busfreq_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int unregister_busfreq_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int get_bus_freq_mode(void)
{
return BUS_FREQ_HIGH;
}
#endif
#endif