2018-08-03 14:29:19 +09:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/clk.h>
|
2020-03-04 19:31:04 +09:00
|
|
|
#include <linux/firmware/imx/ipc.h>
|
2018-08-03 14:29:19 +09:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mailbox_controller.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/of_device.h>
|
MLK-23780 mailbox: imx: Support runtime PM
Some power hungry sub-systems like VPU has its own MUs which also
use mailbox driver, current mailbox driver uses platform driver
model and MU's power will be ON after driver probed and left ON
there, it may cause the whole sub-system can NOT enter lower power
mode, take VPU driver for example, it has runtime PM support, but
due to its MU always ON, the VPU sub-system will be always ON and
consume many power during kernel idle.
To save power in kernel idle, mailbox driver needs to support
runtime PM in order to power off MU when it is unused. However,
the runtime suspend/resume can ONLY be implemented in mailbox's
.shutdown/.startup callback, so its consumer needs to call
mbox_request_channel()/mbox_free_channel() in consumer driver's
runtime PM callback, then the MU's power will be ON/OFF along with
consumer's runtime PM status.
For those consumers never call mbox_free_channel(), MU's power will
be still ON always, if there is obvious power increase observed, we
will request the consumer to free mailbox channel to save power.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-13 15:52:41 +09:00
|
|
|
#include <linux/pm_runtime.h>
|
2018-08-03 14:29:19 +09:00
|
|
|
#include <linux/slab.h>
|
2020-03-26 19:18:28 +09:00
|
|
|
#include <linux/jiffies.h>
|
2018-08-03 14:29:19 +09:00
|
|
|
|
|
|
|
#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
|
|
|
|
#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
|
|
|
|
#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
|
|
|
|
#define IMX_MU_xSR_BRDIP BIT(9)
|
|
|
|
|
|
|
|
/* General Purpose Interrupt Enable */
|
|
|
|
#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
|
|
|
|
/* Receive Interrupt Enable */
|
|
|
|
#define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x)))
|
|
|
|
/* Transmit Interrupt Enable */
|
|
|
|
#define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x)))
|
|
|
|
/* General Purpose Interrupt Request */
|
|
|
|
#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
|
|
|
|
|
|
|
|
#define IMX_MU_CHANS 16
|
|
|
|
#define IMX_MU_CHAN_NAME_SIZE 20
|
|
|
|
|
2020-03-26 19:18:28 +09:00
|
|
|
#define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
|
|
|
|
#define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
enum imx_mu_chan_type {
|
|
|
|
IMX_MU_TYPE_TX, /* Tx */
|
|
|
|
IMX_MU_TYPE_RX, /* Rx */
|
|
|
|
IMX_MU_TYPE_TXDB, /* Tx doorbell */
|
|
|
|
IMX_MU_TYPE_RXDB, /* Rx doorbell */
|
|
|
|
};
|
|
|
|
|
2020-03-04 19:31:04 +09:00
|
|
|
struct imx_sc_rpc_msg_max {
|
|
|
|
struct imx_sc_rpc_msg hdr;
|
|
|
|
u32 data[30];
|
|
|
|
};
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
struct imx_mu_con_priv {
|
|
|
|
unsigned int idx;
|
|
|
|
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
|
|
|
|
enum imx_mu_chan_type type;
|
|
|
|
struct mbox_chan *chan;
|
|
|
|
struct tasklet_struct txdb_tasklet;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct imx_mu_priv {
|
|
|
|
struct device *dev;
|
|
|
|
void __iomem *base;
|
|
|
|
spinlock_t xcr_lock; /* control register lock */
|
|
|
|
|
|
|
|
struct mbox_controller mbox;
|
|
|
|
struct mbox_chan mbox_chans[IMX_MU_CHANS];
|
|
|
|
|
|
|
|
struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
|
2019-07-29 10:54:10 +09:00
|
|
|
const struct imx_mu_dcfg *dcfg;
|
2018-08-03 14:29:19 +09:00
|
|
|
struct clk *clk;
|
|
|
|
int irq;
|
|
|
|
|
2019-03-06 18:25:00 +09:00
|
|
|
/* for control register save and restore */
|
|
|
|
u32 xcr;
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
bool side_b;
|
|
|
|
};
|
|
|
|
|
2020-03-04 14:49:35 +09:00
|
|
|
struct imx_mu_dcfg {
|
2020-03-04 19:31:04 +09:00
|
|
|
int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
|
|
|
|
void *data);
|
2020-03-04 14:49:35 +09:00
|
|
|
int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
|
2020-03-04 19:31:04 +09:00
|
|
|
int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
|
2020-03-04 14:49:35 +09:00
|
|
|
void (*init)(struct imx_mu_priv *priv);
|
|
|
|
u32 xTR[4]; /* Transmit Registers */
|
|
|
|
u32 xRR[4]; /* Receive Registers */
|
|
|
|
u32 xSR; /* Status Register */
|
|
|
|
u32 xCR; /* Control Register */
|
2019-07-29 10:54:10 +09:00
|
|
|
};
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
|
|
|
|
{
|
|
|
|
return container_of(mbox, struct imx_mu_priv, mbox);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
|
|
|
|
{
|
|
|
|
iowrite32(val, priv->base + offs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
|
|
|
|
{
|
|
|
|
return ioread32(priv->base + offs);
|
|
|
|
}
|
|
|
|
|
2020-03-04 19:31:04 +09:00
|
|
|
static int imx_mu_tx_waiting_write(struct imx_mu_priv *priv, u32 idx, u32 val)
|
|
|
|
{
|
2020-03-26 19:18:28 +09:00
|
|
|
u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_TX_TOUT;
|
2020-03-04 19:31:04 +09:00
|
|
|
u32 status;
|
|
|
|
u32 can_write;
|
|
|
|
|
|
|
|
dev_dbg(priv->dev, "Trying to write %.8x to idx %d\n", val, idx);
|
|
|
|
|
|
|
|
do {
|
|
|
|
status = imx_mu_read(priv, priv->dcfg->xSR);
|
|
|
|
can_write = status & IMX_MU_xSR_TEn(idx % 4);
|
2020-03-26 19:18:28 +09:00
|
|
|
} while (!can_write && time_is_after_jiffies64(timeout_time));
|
2020-03-04 19:31:04 +09:00
|
|
|
|
2020-03-26 19:18:28 +09:00
|
|
|
if (!can_write) {
|
2020-03-04 19:31:04 +09:00
|
|
|
dev_err(priv->dev, "timeout trying to write %.8x at %d(%.8x)\n",
|
|
|
|
val, idx, status);
|
|
|
|
return -ETIME;
|
|
|
|
}
|
|
|
|
|
|
|
|
imx_mu_write(priv, val, priv->dcfg->xTR[idx % 4]);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_rx_waiting_read(struct imx_mu_priv *priv, u32 idx, u32 *val)
|
|
|
|
{
|
2020-03-26 19:18:28 +09:00
|
|
|
u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_RX_TOUT;
|
2020-03-04 19:31:04 +09:00
|
|
|
u32 status;
|
|
|
|
u32 can_read;
|
|
|
|
|
|
|
|
dev_dbg(priv->dev, "Trying to read from idx %d\n", idx);
|
|
|
|
|
|
|
|
do {
|
|
|
|
status = imx_mu_read(priv, priv->dcfg->xSR);
|
|
|
|
can_read = status & IMX_MU_xSR_RFn(idx % 4);
|
2020-03-26 19:18:28 +09:00
|
|
|
} while (!can_read && time_is_after_jiffies64(timeout_time));
|
2020-03-04 19:31:04 +09:00
|
|
|
|
2020-03-26 19:18:28 +09:00
|
|
|
if (!can_read) {
|
2020-03-04 19:31:04 +09:00
|
|
|
dev_err(priv->dev, "timeout trying to read idx %d (%.8x)\n",
|
|
|
|
idx, status);
|
|
|
|
return -ETIME;
|
|
|
|
}
|
|
|
|
|
|
|
|
*val = imx_mu_read(priv, priv->dcfg->xRR[idx % 4]);
|
|
|
|
dev_dbg(priv->dev, "Read %.8x\n", *val);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&priv->xcr_lock, flags);
|
2019-07-29 10:54:10 +09:00
|
|
|
val = imx_mu_read(priv, priv->dcfg->xCR);
|
2018-08-03 14:29:19 +09:00
|
|
|
val &= ~clr;
|
|
|
|
val |= set;
|
2019-07-29 10:54:10 +09:00
|
|
|
imx_mu_write(priv, val, priv->dcfg->xCR);
|
2018-08-03 14:29:19 +09:00
|
|
|
spin_unlock_irqrestore(&priv->xcr_lock, flags);
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2020-03-04 14:49:35 +09:00
|
|
|
static int imx_mu_generic_tx(struct imx_mu_priv *priv,
|
|
|
|
struct imx_mu_con_priv *cp,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
u32 *arg = data;
|
|
|
|
|
|
|
|
switch (cp->type) {
|
|
|
|
case IMX_MU_TYPE_TX:
|
|
|
|
imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
|
|
|
|
imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
|
|
|
|
break;
|
|
|
|
case IMX_MU_TYPE_TXDB:
|
|
|
|
imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
|
|
|
|
tasklet_schedule(&cp->txdb_tasklet);
|
|
|
|
break;
|
|
|
|
default:
|
2020-03-04 19:31:04 +09:00
|
|
|
dev_warn_ratelimited(priv->dev,
|
|
|
|
"Send data on wrong channel type: %d\n",
|
|
|
|
cp->type);
|
2020-03-04 14:49:35 +09:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_generic_rx(struct imx_mu_priv *priv,
|
|
|
|
struct imx_mu_con_priv *cp)
|
|
|
|
{
|
|
|
|
u32 dat;
|
|
|
|
|
|
|
|
dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
|
|
|
|
mbox_chan_received_data(cp->chan, (void *)&dat);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-04 19:31:04 +09:00
|
|
|
static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
|
|
|
|
struct imx_mu_con_priv *cp)
|
|
|
|
{
|
|
|
|
imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
|
|
|
|
mbox_chan_received_data(cp->chan, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct imx_sc_rpc_msg_max *msg = data;
|
|
|
|
u32 *arg = data;
|
|
|
|
u32 byte_size;
|
|
|
|
int err;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
dev_dbg(priv->dev, "Sending message\n");
|
|
|
|
|
|
|
|
switch (cp->type) {
|
|
|
|
case IMX_MU_TYPE_TXDB:
|
|
|
|
byte_size = msg->hdr.size * sizeof(u32);
|
|
|
|
if (byte_size > sizeof(*msg)) {
|
|
|
|
/*
|
|
|
|
* The real message size can be different to
|
|
|
|
* struct imx_sc_rpc_msg_max size
|
|
|
|
*/
|
2020-05-13 10:34:16 +09:00
|
|
|
dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), byte_size);
|
2020-03-04 19:31:04 +09:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
print_hex_dump_debug("from client ", DUMP_PREFIX_OFFSET, 4, 4,
|
|
|
|
data, byte_size, false);
|
|
|
|
|
|
|
|
/* Send first word */
|
|
|
|
dev_dbg(priv->dev, "Sending header\n");
|
|
|
|
imx_mu_write(priv, *arg++, priv->dcfg->xTR[0]);
|
|
|
|
|
|
|
|
/* Send signaling */
|
|
|
|
dev_dbg(priv->dev, "Sending signaling\n");
|
|
|
|
imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
|
|
|
|
|
|
|
|
/* Send words to fill the mailbox */
|
|
|
|
for (i = 1; i < 4 && i < msg->hdr.size; i++) {
|
|
|
|
dev_dbg(priv->dev, "Sending word %d\n", i);
|
|
|
|
imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send rest of message waiting for remote read */
|
|
|
|
for (; i < msg->hdr.size; i++) {
|
|
|
|
dev_dbg(priv->dev, "Sending word %d\n", i);
|
|
|
|
err = imx_mu_tx_waiting_write(priv, i, *arg++);
|
|
|
|
if (err) {
|
|
|
|
dev_err(priv->dev, "Timeout tx %d\n", i);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Simulate hack for mbox framework */
|
|
|
|
tasklet_schedule(&cp->txdb_tasklet);
|
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_warn_ratelimited(priv->dev,
|
|
|
|
"Send data on wrong channel type: %d\n",
|
|
|
|
cp->type);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
|
|
|
|
{
|
|
|
|
struct imx_sc_rpc_msg_max msg;
|
|
|
|
u32 *data = (u32 *)&msg;
|
|
|
|
u32 byte_size;
|
2020-03-26 00:47:18 +09:00
|
|
|
int err = 0;
|
2020-03-04 19:31:04 +09:00
|
|
|
int i;
|
|
|
|
|
|
|
|
dev_dbg(priv->dev, "Receiving message\n");
|
|
|
|
|
|
|
|
/* Read header */
|
|
|
|
dev_dbg(priv->dev, "Receiving header\n");
|
|
|
|
*data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
|
|
|
|
byte_size = msg.hdr.size * sizeof(u32);
|
|
|
|
if (byte_size > sizeof(msg)) {
|
2020-05-13 10:34:16 +09:00
|
|
|
dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), byte_size);
|
2020-03-26 00:47:18 +09:00
|
|
|
err = -EINVAL;
|
|
|
|
goto error;
|
2020-03-04 19:31:04 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Read message waiting they are written */
|
|
|
|
for (i = 1; i < msg.hdr.size; i++) {
|
|
|
|
dev_dbg(priv->dev, "Receiving word %d\n", i);
|
|
|
|
err = imx_mu_rx_waiting_read(priv, i, data++);
|
|
|
|
if (err) {
|
|
|
|
dev_err(priv->dev, "Timeout rx %d\n", i);
|
2020-03-26 00:47:18 +09:00
|
|
|
goto error;
|
2020-03-04 19:31:04 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear GIP */
|
|
|
|
imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
|
|
|
|
|
|
|
|
print_hex_dump_debug("to client ", DUMP_PREFIX_OFFSET, 4, 4,
|
|
|
|
&msg, byte_size, false);
|
|
|
|
|
|
|
|
/* send data to client */
|
|
|
|
dev_dbg(priv->dev, "Sending message to client\n");
|
|
|
|
mbox_chan_received_data(cp->chan, (void *)&msg);
|
|
|
|
|
2020-03-26 00:47:18 +09:00
|
|
|
goto exit;
|
|
|
|
|
|
|
|
error:
|
|
|
|
mbox_chan_received_data(cp->chan, ERR_PTR(err));
|
|
|
|
|
|
|
|
exit:
|
|
|
|
return err;
|
2020-03-04 19:31:04 +09:00
|
|
|
}
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
static void imx_mu_txdb_tasklet(unsigned long data)
|
|
|
|
{
|
|
|
|
struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
|
|
|
|
|
|
|
|
mbox_chan_txdone(cp->chan, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t imx_mu_isr(int irq, void *p)
|
|
|
|
{
|
|
|
|
struct mbox_chan *chan = p;
|
|
|
|
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
|
|
|
|
struct imx_mu_con_priv *cp = chan->con_priv;
|
2020-03-04 14:49:35 +09:00
|
|
|
u32 val, ctrl;
|
2018-08-03 14:29:19 +09:00
|
|
|
|
2019-07-29 10:54:10 +09:00
|
|
|
ctrl = imx_mu_read(priv, priv->dcfg->xCR);
|
|
|
|
val = imx_mu_read(priv, priv->dcfg->xSR);
|
2018-08-03 14:29:19 +09:00
|
|
|
|
2020-03-04 19:31:04 +09:00
|
|
|
dev_dbg(priv->dev, "isr: status: %.8x ctrl: %.8x\n", val, ctrl);
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
switch (cp->type) {
|
|
|
|
case IMX_MU_TYPE_TX:
|
|
|
|
val &= IMX_MU_xSR_TEn(cp->idx) &
|
|
|
|
(ctrl & IMX_MU_xCR_TIEn(cp->idx));
|
|
|
|
break;
|
|
|
|
case IMX_MU_TYPE_RX:
|
|
|
|
val &= IMX_MU_xSR_RFn(cp->idx) &
|
|
|
|
(ctrl & IMX_MU_xCR_RIEn(cp->idx));
|
|
|
|
break;
|
|
|
|
case IMX_MU_TYPE_RXDB:
|
|
|
|
val &= IMX_MU_xSR_GIPn(cp->idx) &
|
|
|
|
(ctrl & IMX_MU_xCR_GIEn(cp->idx));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!val)
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
if (val == IMX_MU_xSR_TEn(cp->idx)) {
|
|
|
|
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
|
|
|
|
mbox_chan_txdone(chan, 0);
|
|
|
|
} else if (val == IMX_MU_xSR_RFn(cp->idx)) {
|
2020-03-04 14:49:35 +09:00
|
|
|
priv->dcfg->rx(priv, cp);
|
2018-08-03 14:29:19 +09:00
|
|
|
} else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
|
2020-03-04 19:31:04 +09:00
|
|
|
priv->dcfg->rxdb(priv, cp);
|
2018-08-03 14:29:19 +09:00
|
|
|
} else {
|
|
|
|
dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_send_data(struct mbox_chan *chan, void *data)
|
|
|
|
{
|
|
|
|
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
|
|
|
|
struct imx_mu_con_priv *cp = chan->con_priv;
|
|
|
|
|
2020-03-04 14:49:35 +09:00
|
|
|
return priv->dcfg->tx(priv, cp, data);
|
2018-08-03 14:29:19 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_startup(struct mbox_chan *chan)
|
|
|
|
{
|
|
|
|
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
|
|
|
|
struct imx_mu_con_priv *cp = chan->con_priv;
|
2020-05-07 20:57:55 +09:00
|
|
|
unsigned long irq_flag = IRQF_SHARED;
|
2018-08-03 14:29:19 +09:00
|
|
|
int ret;
|
|
|
|
|
MLK-23780 mailbox: imx: Support runtime PM
Some power hungry sub-systems like VPU has its own MUs which also
use mailbox driver, current mailbox driver uses platform driver
model and MU's power will be ON after driver probed and left ON
there, it may cause the whole sub-system can NOT enter lower power
mode, take VPU driver for example, it has runtime PM support, but
due to its MU always ON, the VPU sub-system will be always ON and
consume many power during kernel idle.
To save power in kernel idle, mailbox driver needs to support
runtime PM in order to power off MU when it is unused. However,
the runtime suspend/resume can ONLY be implemented in mailbox's
.shutdown/.startup callback, so its consumer needs to call
mbox_request_channel()/mbox_free_channel() in consumer driver's
runtime PM callback, then the MU's power will be ON/OFF along with
consumer's runtime PM status.
For those consumers never call mbox_free_channel(), MU's power will
be still ON always, if there is obvious power increase observed, we
will request the consumer to free mailbox channel to save power.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-13 15:52:41 +09:00
|
|
|
pm_runtime_get_sync(priv->dev);
|
2018-08-03 14:29:19 +09:00
|
|
|
if (cp->type == IMX_MU_TYPE_TXDB) {
|
|
|
|
/* Tx doorbell don't have ACK support */
|
|
|
|
tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
|
|
|
|
(unsigned long)cp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-07 20:57:55 +09:00
|
|
|
/* IPC MU should be with IRQF_NO_SUSPEND set */
|
|
|
|
if (!priv->dev->pm_domain)
|
|
|
|
irq_flag |= IRQF_NO_SUSPEND;
|
|
|
|
|
|
|
|
ret = request_irq(priv->irq, imx_mu_isr, irq_flag,
|
|
|
|
cp->irq_desc, chan);
|
2018-08-03 14:29:19 +09:00
|
|
|
if (ret) {
|
|
|
|
dev_err(priv->dev,
|
|
|
|
"Unable to acquire IRQ %d\n", priv->irq);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cp->type) {
|
|
|
|
case IMX_MU_TYPE_RX:
|
|
|
|
imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0);
|
|
|
|
break;
|
|
|
|
case IMX_MU_TYPE_RXDB:
|
|
|
|
imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void imx_mu_shutdown(struct mbox_chan *chan)
|
|
|
|
{
|
|
|
|
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
|
|
|
|
struct imx_mu_con_priv *cp = chan->con_priv;
|
|
|
|
|
2019-08-01 05:47:29 +09:00
|
|
|
if (cp->type == IMX_MU_TYPE_TXDB) {
|
2018-08-03 14:29:19 +09:00
|
|
|
tasklet_kill(&cp->txdb_tasklet);
|
MLK-23780 mailbox: imx: Support runtime PM
Some power hungry sub-systems like VPU has its own MUs which also
use mailbox driver, current mailbox driver uses platform driver
model and MU's power will be ON after driver probed and left ON
there, it may cause the whole sub-system can NOT enter lower power
mode, take VPU driver for example, it has runtime PM support, but
due to its MU always ON, the VPU sub-system will be always ON and
consume many power during kernel idle.
To save power in kernel idle, mailbox driver needs to support
runtime PM in order to power off MU when it is unused. However,
the runtime suspend/resume can ONLY be implemented in mailbox's
.shutdown/.startup callback, so its consumer needs to call
mbox_request_channel()/mbox_free_channel() in consumer driver's
runtime PM callback, then the MU's power will be ON/OFF along with
consumer's runtime PM status.
For those consumers never call mbox_free_channel(), MU's power will
be still ON always, if there is obvious power increase observed, we
will request the consumer to free mailbox channel to save power.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-13 15:52:41 +09:00
|
|
|
pm_runtime_put_sync(priv->dev);
|
2019-08-01 05:47:29 +09:00
|
|
|
return;
|
|
|
|
}
|
2018-08-03 14:29:19 +09:00
|
|
|
|
2019-08-01 23:41:25 +09:00
|
|
|
switch (cp->type) {
|
|
|
|
case IMX_MU_TYPE_TX:
|
|
|
|
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
|
|
|
|
break;
|
|
|
|
case IMX_MU_TYPE_RX:
|
|
|
|
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
|
|
|
|
break;
|
|
|
|
case IMX_MU_TYPE_RXDB:
|
|
|
|
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2018-08-03 14:29:19 +09:00
|
|
|
|
|
|
|
free_irq(priv->irq, chan);
|
MLK-23780 mailbox: imx: Support runtime PM
Some power hungry sub-systems like VPU has its own MUs which also
use mailbox driver, current mailbox driver uses platform driver
model and MU's power will be ON after driver probed and left ON
there, it may cause the whole sub-system can NOT enter lower power
mode, take VPU driver for example, it has runtime PM support, but
due to its MU always ON, the VPU sub-system will be always ON and
consume many power during kernel idle.
To save power in kernel idle, mailbox driver needs to support
runtime PM in order to power off MU when it is unused. However,
the runtime suspend/resume can ONLY be implemented in mailbox's
.shutdown/.startup callback, so its consumer needs to call
mbox_request_channel()/mbox_free_channel() in consumer driver's
runtime PM callback, then the MU's power will be ON/OFF along with
consumer's runtime PM status.
For those consumers never call mbox_free_channel(), MU's power will
be still ON always, if there is obvious power increase observed, we
will request the consumer to free mailbox channel to save power.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-13 15:52:41 +09:00
|
|
|
pm_runtime_put_sync(priv->dev);
|
2018-08-03 14:29:19 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct mbox_chan_ops imx_mu_ops = {
|
|
|
|
.send_data = imx_mu_send_data,
|
|
|
|
.startup = imx_mu_startup,
|
|
|
|
.shutdown = imx_mu_shutdown,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
|
|
|
|
const struct of_phandle_args *sp)
|
|
|
|
{
|
|
|
|
u32 type, idx, chan;
|
|
|
|
|
|
|
|
if (sp->args_count != 2) {
|
2020-03-04 19:31:04 +09:00
|
|
|
dev_err(mbox->dev, "Invalid argument count %d\n",
|
|
|
|
sp->args_count);
|
2018-08-03 14:29:19 +09:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
type = sp->args[0]; /* channel type */
|
|
|
|
idx = sp->args[1]; /* index */
|
|
|
|
chan = type * 4 + idx;
|
|
|
|
|
|
|
|
if (chan >= mbox->num_chans) {
|
2020-03-04 19:31:04 +09:00
|
|
|
dev_err(mbox->dev,
|
|
|
|
"Not supported chan number: %d. (type: %d, idx: %d)\n",
|
|
|
|
chan, type, idx);
|
2018-08-03 14:29:19 +09:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return &mbox->chans[chan];
|
|
|
|
}
|
|
|
|
|
2020-03-04 19:31:04 +09:00
|
|
|
static struct mbox_chan * imx_mu_seco_xlate(struct mbox_controller *mbox,
|
|
|
|
const struct of_phandle_args *sp)
|
|
|
|
{
|
|
|
|
u32 type;
|
|
|
|
|
|
|
|
if (sp->args_count < 1) {
|
|
|
|
dev_err(mbox->dev, "Invalid argument count %d\n",
|
|
|
|
sp->args_count);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
type = sp->args[0]; /* channel type */
|
|
|
|
|
|
|
|
/* Only supports TXDB and RXDB */
|
|
|
|
if (type == IMX_MU_TYPE_TX || type == IMX_MU_TYPE_RX) {
|
|
|
|
dev_err(mbox->dev, "Invalid type: %d\n", type);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return imx_mu_xlate(mbox, sp);
|
|
|
|
}
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
static void imx_mu_init_generic(struct imx_mu_priv *priv)
|
|
|
|
{
|
2020-03-04 14:49:35 +09:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < IMX_MU_CHANS; i++) {
|
|
|
|
struct imx_mu_con_priv *cp = &priv->con_priv[i];
|
|
|
|
|
|
|
|
cp->idx = i % 4;
|
|
|
|
cp->type = i >> 2;
|
|
|
|
cp->chan = &priv->mbox_chans[i];
|
|
|
|
priv->mbox_chans[i].con_priv = cp;
|
|
|
|
snprintf(cp->irq_desc, sizeof(cp->irq_desc),
|
|
|
|
"imx_mu_chan[%i-%i]", cp->type, cp->idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->mbox.num_chans = IMX_MU_CHANS;
|
|
|
|
priv->mbox.of_xlate = imx_mu_xlate;
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
if (priv->side_b)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Set default MU configuration */
|
2019-07-29 10:54:10 +09:00
|
|
|
imx_mu_write(priv, 0, priv->dcfg->xCR);
|
2018-08-03 14:29:19 +09:00
|
|
|
}
|
|
|
|
|
2020-03-04 19:31:04 +09:00
|
|
|
static void imx_mu_seco_init(struct imx_mu_priv *priv)
|
|
|
|
{
|
|
|
|
imx_mu_init_generic(priv);
|
|
|
|
priv->mbox.of_xlate = imx_mu_seco_xlate;
|
|
|
|
}
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
static int imx_mu_probe(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
struct device_node *np = dev->of_node;
|
|
|
|
struct imx_mu_priv *priv;
|
2019-07-29 10:54:10 +09:00
|
|
|
const struct imx_mu_dcfg *dcfg;
|
2018-08-03 14:29:19 +09:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (!priv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
priv->dev = dev;
|
|
|
|
|
2019-04-01 14:15:24 +09:00
|
|
|
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
2018-08-03 14:29:19 +09:00
|
|
|
if (IS_ERR(priv->base))
|
|
|
|
return PTR_ERR(priv->base);
|
|
|
|
|
|
|
|
priv->irq = platform_get_irq(pdev, 0);
|
|
|
|
if (priv->irq < 0)
|
|
|
|
return priv->irq;
|
|
|
|
|
2019-07-29 10:54:10 +09:00
|
|
|
dcfg = of_device_get_match_data(dev);
|
|
|
|
if (!dcfg)
|
|
|
|
return -EINVAL;
|
|
|
|
priv->dcfg = dcfg;
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
priv->clk = devm_clk_get(dev, NULL);
|
|
|
|
if (IS_ERR(priv->clk)) {
|
|
|
|
if (PTR_ERR(priv->clk) != -ENOENT)
|
|
|
|
return PTR_ERR(priv->clk);
|
|
|
|
|
|
|
|
priv->clk = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = clk_prepare_enable(priv->clk);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "Failed to enable clock\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
|
|
|
|
|
2020-03-04 14:49:35 +09:00
|
|
|
priv->dcfg->init(priv);
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
spin_lock_init(&priv->xcr_lock);
|
|
|
|
|
|
|
|
priv->mbox.dev = dev;
|
|
|
|
priv->mbox.ops = &imx_mu_ops;
|
|
|
|
priv->mbox.chans = priv->mbox_chans;
|
|
|
|
priv->mbox.txdone_irq = true;
|
|
|
|
|
|
|
|
platform_set_drvdata(pdev, priv);
|
|
|
|
|
MLK-23780 mailbox: imx: Support runtime PM
Some power hungry sub-systems like VPU has its own MUs which also
use mailbox driver, current mailbox driver uses platform driver
model and MU's power will be ON after driver probed and left ON
there, it may cause the whole sub-system can NOT enter lower power
mode, take VPU driver for example, it has runtime PM support, but
due to its MU always ON, the VPU sub-system will be always ON and
consume many power during kernel idle.
To save power in kernel idle, mailbox driver needs to support
runtime PM in order to power off MU when it is unused. However,
the runtime suspend/resume can ONLY be implemented in mailbox's
.shutdown/.startup callback, so its consumer needs to call
mbox_request_channel()/mbox_free_channel() in consumer driver's
runtime PM callback, then the MU's power will be ON/OFF along with
consumer's runtime PM status.
For those consumers never call mbox_free_channel(), MU's power will
be still ON always, if there is obvious power increase observed, we
will request the consumer to free mailbox channel to save power.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-13 15:52:41 +09:00
|
|
|
ret = devm_mbox_controller_register(dev, &priv->mbox);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
pm_runtime_enable(dev);
|
|
|
|
|
|
|
|
ret = pm_runtime_get_sync(dev);
|
|
|
|
if (ret < 0) {
|
|
|
|
pm_runtime_put_noidle(dev);
|
|
|
|
goto disable_runtime_pm;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pm_runtime_put_sync(dev);
|
|
|
|
if (ret < 0)
|
|
|
|
goto disable_runtime_pm;
|
|
|
|
|
2020-04-30 16:40:11 +09:00
|
|
|
clk_disable_unprepare(priv->clk);
|
|
|
|
|
MLK-23780 mailbox: imx: Support runtime PM
Some power hungry sub-systems like VPU has its own MUs which also
use mailbox driver, current mailbox driver uses platform driver
model and MU's power will be ON after driver probed and left ON
there, it may cause the whole sub-system can NOT enter lower power
mode, take VPU driver for example, it has runtime PM support, but
due to its MU always ON, the VPU sub-system will be always ON and
consume many power during kernel idle.
To save power in kernel idle, mailbox driver needs to support
runtime PM in order to power off MU when it is unused. However,
the runtime suspend/resume can ONLY be implemented in mailbox's
.shutdown/.startup callback, so its consumer needs to call
mbox_request_channel()/mbox_free_channel() in consumer driver's
runtime PM callback, then the MU's power will be ON/OFF along with
consumer's runtime PM status.
For those consumers never call mbox_free_channel(), MU's power will
be still ON always, if there is obvious power increase observed, we
will request the consumer to free mailbox channel to save power.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-13 15:52:41 +09:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
disable_runtime_pm:
|
|
|
|
pm_runtime_disable(dev);
|
|
|
|
return ret;
|
2018-08-03 14:29:19 +09:00
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_remove(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct imx_mu_priv *priv = platform_get_drvdata(pdev);
|
|
|
|
|
MLK-23780 mailbox: imx: Support runtime PM
Some power hungry sub-systems like VPU has its own MUs which also
use mailbox driver, current mailbox driver uses platform driver
model and MU's power will be ON after driver probed and left ON
there, it may cause the whole sub-system can NOT enter lower power
mode, take VPU driver for example, it has runtime PM support, but
due to its MU always ON, the VPU sub-system will be always ON and
consume many power during kernel idle.
To save power in kernel idle, mailbox driver needs to support
runtime PM in order to power off MU when it is unused. However,
the runtime suspend/resume can ONLY be implemented in mailbox's
.shutdown/.startup callback, so its consumer needs to call
mbox_request_channel()/mbox_free_channel() in consumer driver's
runtime PM callback, then the MU's power will be ON/OFF along with
consumer's runtime PM status.
For those consumers never call mbox_free_channel(), MU's power will
be still ON always, if there is obvious power increase observed, we
will request the consumer to free mailbox channel to save power.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-13 15:52:41 +09:00
|
|
|
pm_runtime_disable(priv->dev);
|
2018-08-03 14:29:19 +09:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-06 18:25:00 +09:00
|
|
|
static int imx_mu_suspend_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct imx_mu_priv *priv = dev_get_drvdata(dev);
|
2020-04-30 16:40:11 +09:00
|
|
|
|
MLK-23952 mailbox: imx: ONLY save/restore MU context for SCU platforms
On i.MX8MP, the MU clock is combined with power domain and runtime
PM is enabled, during noirq suspend/resume phase, runtime PM is
disabled by device suspend, calling clock prepare will trigger
runtime resume failure and lead to system suspend failed.
There is no good way to handle such MU clocks combined with runtime
PM in noirq suspend phase, actually, the MU context save/restore is
ONLY necessary for SCU IPC MU, other MUs especially on i.MX8MP platforms
which have clocks assigned, they need to runtime request/free mailbox
channel in the consumer driver, so no need to save/restore MU context
for them, hence it can avoid this issue.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-05-09 12:04:58 +09:00
|
|
|
if (!priv->clk)
|
|
|
|
priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
|
2019-03-06 18:25:00 +09:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_resume_noirq(struct device *dev)
|
|
|
|
{
|
|
|
|
struct imx_mu_priv *priv = dev_get_drvdata(dev);
|
|
|
|
|
MLK-23835 mailbox: imx: Only restore MU settings when context lost
During noirq suspend/resume, if MU context is NOT lost, such
as freeze mode suspend, when resume, there could be 2 CPUs
calling IPC, 1 CPU is in charge of handling wakeup event, the
other CPU is busy with device resume flow, the MU TIE could
be set during IPC called by the CPU handling wakeup event,
then the noirq resume callback in mailbox will be called by
the CPU executing device resume, it could overwrite the MU
settings and clear TIE by mistake, then cause the TX never
finish and IPC mutex lock never released, and system will
freeze, all CPUs are in idle and never wake up.
To avoid this issue, we should ONLY restore the MU settings
when its context is lost.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reported-by: Clark Wang <xiaoning.wang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-23 17:52:40 +09:00
|
|
|
/*
|
|
|
|
* ONLY restore MU when context lost, the TIE could
|
|
|
|
* be set during noirq resume as there is MU data
|
|
|
|
* communication going on, and restore the saved
|
|
|
|
* value will overwrite the TIE and cause MU data
|
|
|
|
* send failed, may lead to system freeze. This issue
|
|
|
|
* is observed by testing freeze mode suspend.
|
|
|
|
*/
|
MLK-23952 mailbox: imx: ONLY save/restore MU context for SCU platforms
On i.MX8MP, the MU clock is combined with power domain and runtime
PM is enabled, during noirq suspend/resume phase, runtime PM is
disabled by device suspend, calling clock prepare will trigger
runtime resume failure and lead to system suspend failed.
There is no good way to handle such MU clocks combined with runtime
PM in noirq suspend phase, actually, the MU context save/restore is
ONLY necessary for SCU IPC MU, other MUs especially on i.MX8MP platforms
which have clocks assigned, they need to runtime request/free mailbox
channel in the consumer driver, so no need to save/restore MU context
for them, hence it can avoid this issue.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-05-09 12:04:58 +09:00
|
|
|
if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
|
MLK-23835 mailbox: imx: Only restore MU settings when context lost
During noirq suspend/resume, if MU context is NOT lost, such
as freeze mode suspend, when resume, there could be 2 CPUs
calling IPC, 1 CPU is in charge of handling wakeup event, the
other CPU is busy with device resume flow, the MU TIE could
be set during IPC called by the CPU handling wakeup event,
then the noirq resume callback in mailbox will be called by
the CPU executing device resume, it could overwrite the MU
settings and clear TIE by mistake, then cause the TX never
finish and IPC mutex lock never released, and system will
freeze, all CPUs are in idle and never wake up.
To avoid this issue, we should ONLY restore the MU settings
when its context is lost.
Signed-off-by: Anson Huang <Anson.Huang@nxp.com>
Reported-by: Clark Wang <xiaoning.wang@nxp.com>
Reviewed-by: Jacky Bai <ping.bai@nxp.com>
2020-04-23 17:52:40 +09:00
|
|
|
imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
|
2019-03-06 18:25:00 +09:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-04-30 16:40:11 +09:00
|
|
|
static int imx_mu_runtime_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
struct imx_mu_priv *priv = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
clk_disable_unprepare(priv->clk);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int imx_mu_runtime_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
struct imx_mu_priv *priv = dev_get_drvdata(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = clk_prepare_enable(priv->clk);
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "failed to enable clock\n");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-06 18:25:00 +09:00
|
|
|
static const struct dev_pm_ops imx_mu_pm_ops = {
|
|
|
|
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
|
|
|
|
imx_mu_resume_noirq)
|
2020-04-30 16:40:11 +09:00
|
|
|
SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
|
|
|
|
imx_mu_runtime_resume, NULL)
|
2019-03-06 18:25:00 +09:00
|
|
|
};
|
|
|
|
|
2020-03-04 14:49:35 +09:00
|
|
|
static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
|
|
|
|
.tx = imx_mu_generic_tx,
|
|
|
|
.rx = imx_mu_generic_rx,
|
2020-03-04 19:31:04 +09:00
|
|
|
.rxdb = imx_mu_generic_rxdb,
|
2020-03-04 14:49:35 +09:00
|
|
|
.init = imx_mu_init_generic,
|
|
|
|
.xTR = {0x0, 0x4, 0x8, 0xc},
|
|
|
|
.xRR = {0x10, 0x14, 0x18, 0x1c},
|
|
|
|
.xSR = 0x20,
|
|
|
|
.xCR = 0x24,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
|
|
|
|
.tx = imx_mu_generic_tx,
|
|
|
|
.rx = imx_mu_generic_rx,
|
2020-03-04 19:31:04 +09:00
|
|
|
.rxdb = imx_mu_generic_rxdb,
|
2020-03-04 14:49:35 +09:00
|
|
|
.init = imx_mu_init_generic,
|
|
|
|
.xTR = {0x20, 0x24, 0x28, 0x2c},
|
|
|
|
.xRR = {0x40, 0x44, 0x48, 0x4c},
|
|
|
|
.xSR = 0x60,
|
|
|
|
.xCR = 0x64,
|
|
|
|
};
|
|
|
|
|
2020-03-04 19:31:04 +09:00
|
|
|
static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
|
|
|
|
.tx = imx_mu_seco_tx,
|
|
|
|
.rxdb = imx_mu_seco_rxdb,
|
|
|
|
.init = imx_mu_seco_init,
|
|
|
|
.xTR = {0x0, 0x4, 0x8, 0xc},
|
|
|
|
.xRR = {0x10, 0x14, 0x18, 0x1c},
|
|
|
|
.xSR = 0x20,
|
|
|
|
.xCR = 0x24,
|
|
|
|
};
|
|
|
|
|
2018-08-03 14:29:19 +09:00
|
|
|
static const struct of_device_id imx_mu_dt_ids[] = {
|
2019-07-29 10:54:10 +09:00
|
|
|
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
|
|
|
|
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
|
2020-03-04 19:31:04 +09:00
|
|
|
{ .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
|
2018-08-03 14:29:19 +09:00
|
|
|
{ },
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
|
|
|
|
|
|
|
|
static struct platform_driver imx_mu_driver = {
|
|
|
|
.probe = imx_mu_probe,
|
|
|
|
.remove = imx_mu_remove,
|
|
|
|
.driver = {
|
|
|
|
.name = "imx_mu",
|
|
|
|
.of_match_table = imx_mu_dt_ids,
|
2019-03-06 18:25:00 +09:00
|
|
|
.pm = &imx_mu_pm_ops,
|
2018-08-03 14:29:19 +09:00
|
|
|
},
|
|
|
|
};
|
2019-08-07 19:13:40 +09:00
|
|
|
static int __init imx_mu_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = platform_driver_register(&imx_mu_driver);
|
|
|
|
if (ret)
|
|
|
|
pr_err("Unable to initialize mu driver\n");
|
|
|
|
else
|
|
|
|
pr_info("imx mu driver is registered.\n");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
arch_initcall(imx_mu_init);
|
2018-08-03 14:29:19 +09:00
|
|
|
|
|
|
|
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
|
|
|
|
MODULE_DESCRIPTION("Message Unit driver for i.MX");
|
|
|
|
MODULE_LICENSE("GPL v2");
|