From 9a92851c33e87bbf00e2cb11f16067b87032ed98 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Mon, 10 May 2021 20:06:08 +0530 Subject: [PATCH] dma: ti: k3-udma: Add BCDMA and PKTDMA support Sync BCDMA and PKTDMA support from Kernel for AM64 SoC Signed-off-by: Vignesh Raghavendra --- drivers/dma/ti/k3-udma.c | 1003 +++++++++++++++++++++++++++++++++++--- 1 file changed, 939 insertions(+), 64 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index f3ec827897..601868d7fc 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "k3-udma-hwdef.h" #include "k3-psil-priv.h" @@ -36,15 +37,25 @@ struct udma_chan; +enum k3_dma_type { + DMA_TYPE_UDMA = 0, + DMA_TYPE_BCDMA, + DMA_TYPE_PKTDMA, +}; + enum udma_mmr { MMR_GCFG = 0, + MMR_BCHANRT, MMR_RCHANRT, MMR_TCHANRT, MMR_LAST, }; static const char * const mmr_names[] = { - "gcfg", "rchanrt", "tchanrt" + [MMR_GCFG] = "gcfg", + [MMR_BCHANRT] = "bchanrt", + [MMR_RCHANRT] = "rchanrt", + [MMR_TCHANRT] = "tchanrt", }; struct udma_tchan { @@ -53,6 +64,16 @@ struct udma_tchan { int id; struct k3_nav_ring *t_ring; /* Transmit ring */ struct k3_nav_ring *tc_ring; /* Transmit Completion ring */ + int tflow_id; /* applicable only for PKTDMA */ + +}; + +#define udma_bchan udma_tchan + +struct udma_rflow { + int id; + struct k3_nav_ring *fd_ring; /* Free Descriptor ring */ + struct k3_nav_ring *r_ring; /* Receive ring */ }; struct udma_rchan { @@ -61,32 +82,45 @@ struct udma_rchan { int id; }; +struct udma_oes_offsets { + /* K3 UDMA Output Event Offset */ + u32 udma_rchan; + + /* BCDMA Output Event Offsets */ + u32 bcdma_bchan_data; + u32 bcdma_bchan_ring; + u32 bcdma_tchan_data; + u32 bcdma_tchan_ring; + u32 bcdma_rchan_data; + u32 bcdma_rchan_ring; + + /* PKTDMA Output Event Offsets */ + u32 pktdma_tchan_flow; + u32 pktdma_rchan_flow; +}; + #define UDMA_FLAG_PDMA_ACC32 BIT(0) #define UDMA_FLAG_PDMA_BURST BIT(1) #define UDMA_FLAG_TDTYPE BIT(2) struct udma_match_data { + enum k3_dma_type type; u32 psil_base; bool enable_memcpy_support; u32 flags; u32 statictr_z_mask; - u32 rchan_oes_offset; + struct udma_oes_offsets oes; u8 tpl_levels; u32 level_start_idx[]; }; -struct udma_rflow { - int id; - - struct k3_nav_ring *fd_ring; /* Free Descriptor ring */ - struct k3_nav_ring *r_ring; /* Receive ring*/ -}; - enum udma_rm_range { - RM_RANGE_TCHAN = 0, + RM_RANGE_BCHAN = 0, + RM_RANGE_TCHAN, RM_RANGE_RCHAN, RM_RANGE_RFLOW, + RM_RANGE_TFLOW, RM_RANGE_LAST, }; @@ -111,15 +145,21 @@ struct udma_dev { u32 features; + int bchan_cnt; int tchan_cnt; int echan_cnt; int rchan_cnt; int rflow_cnt; + int tflow_cnt; + unsigned long *bchan_map; unsigned long *tchan_map; unsigned long *rchan_map; unsigned long *rflow_map; unsigned long *rflow_map_reserved; + unsigned long *rflow_in_use; + unsigned long *tflow_map; + struct udma_bchan *bchans; struct udma_tchan *tchans; struct udma_rchan *rchans; struct udma_rflow *rflows; @@ -143,6 +183,11 @@ struct udma_chan_config { enum psil_endpoint_type ep_type; enum udma_tp_level channel_tpl; /* Channel Throughput Level */ + /* PKTDMA mapped channel */ + int mapped_channel_id; + /* PKTDMA default tflow or rflow for mapped channel */ + int default_flow_id; + enum dma_direction dir; unsigned int pkt_mode:1; /* TR or packet */ @@ -156,6 +201,7 @@ struct udma_chan { struct udma_dev *ud; char name[20]; + struct udma_bchan *bchan; struct udma_tchan *tchan; struct udma_rchan *rchan; struct udma_rflow *rflow; @@ -289,6 +335,14 @@ static inline char *udma_get_dir_text(enum dma_direction dir) return "invalid"; } +static void udma_reset_uchan(struct udma_chan *uc) +{ + memset(&uc->config, 0, sizeof(uc->config)); + uc->config.remote_thread_id = -1; + uc->config.mapped_channel_id = -1; + uc->config.default_flow_id = -1; +} + static inline bool udma_is_chan_running(struct udma_chan *uc) { u32 trt_ctl = 0; @@ -371,7 +425,7 @@ static void udma_reset_rings(struct udma_chan *uc) } if (ring1) - k3_nav_ringacc_ring_reset_dma(ring1, 0); + k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1)); if (ring2) k3_nav_ringacc_ring_reset(ring2); } @@ -390,8 +444,10 @@ static void udma_reset_counters(struct udma_chan *uc) val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG); udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val); - val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); - udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val); + if (!uc->bchan) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val); + } } if (uc->rchan) { @@ -640,10 +696,32 @@ static int udma_get_tchan(struct udma_chan *uc) return 0; } - uc->tchan = __udma_reserve_tchan(ud, -1); + uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id); if (IS_ERR(uc->tchan)) return PTR_ERR(uc->tchan); + if (ud->tflow_cnt) { + int tflow_id; + + /* Only PKTDMA have support for tx flows */ + if (uc->config.default_flow_id >= 0) + tflow_id = uc->config.default_flow_id; + else + tflow_id = uc->tchan->id; + + if (test_bit(tflow_id, ud->tflow_map)) { + dev_err(ud->dev, "tflow%d is in use\n", tflow_id); + __clear_bit(uc->tchan->id, ud->tchan_map); + uc->tchan = NULL; + return -ENOENT; + } + + uc->tchan->tflow_id = tflow_id; + __set_bit(tflow_id, ud->tflow_map); + } else { + uc->tchan->tflow_id = -1; + } + pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id); return 0; @@ -659,7 +737,7 @@ static int udma_get_rchan(struct udma_chan *uc) return 0; } - uc->rchan = __udma_reserve_rchan(ud, -1); + uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id); if (IS_ERR(uc->rchan)) return PTR_ERR(uc->rchan); @@ -751,6 +829,8 @@ static void udma_put_tchan(struct udma_chan *uc) dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, uc->tchan->id); __clear_bit(uc->tchan->id, ud->tchan_map); + if (uc->tchan->tflow_id >= 0) + __clear_bit(uc->tchan->tflow_id, ud->tflow_map); uc->tchan = NULL; } } @@ -855,15 +935,24 @@ static int udma_alloc_rx_resources(struct udma_chan *uc) if (uc->config.dir == DMA_MEM_TO_MEM) return 0; - ret = udma_get_rflow(uc, uc->rchan->id); + if (uc->config.default_flow_id >= 0) + ret = udma_get_rflow(uc, uc->config.default_flow_id); + else + ret = udma_get_rflow(uc, uc->rchan->id); + if (ret) { ret = -EBUSY; goto err_rflow; } - fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; - rflow = uc->rflow; + if (ud->tflow_cnt) { + fd_ring_id = ud->tflow_cnt + rflow->id; + } else { + fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + + uc->rchan->id; + } + ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, &rflow->fd_ring, &rflow->r_ring); if (ret) { @@ -950,9 +1039,7 @@ static int udma_alloc_rchan_sci_req(struct udma_chan *uc) req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | - TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | - TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | - TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID; req.nav_id = tisci_rm->tisci_dev_id; req.index = uc->rchan->id; req.rx_chan_type = mode; @@ -965,9 +1052,13 @@ static int udma_alloc_rchan_sci_req(struct udma_chan *uc) 0) >> 2; req.rxcq_qnum = rx_ring; } - if (uc->rflow->id != uc->rchan->id && uc->config.dir != DMA_MEM_TO_MEM) { + if (ud->match_data->type == DMA_TYPE_UDMA && + uc->rflow->id != uc->rchan->id && + uc->config.dir != DMA_MEM_TO_MEM) { req.flowid_start = uc->rflow->id; req.flowid_cnt = 1; + req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; } ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); @@ -1150,12 +1241,58 @@ static void udma_free_chan_resources(struct udma_chan *uc) uc->config.dir = DMA_MEM_TO_MEM; } +static const char * const range_names[] = { + [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan", + [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan", + [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan", + [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow", + [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow", +}; + static int udma_get_mmrs(struct udevice *dev) { struct udma_dev *ud = dev_get_priv(dev); + u32 cap2, cap3, cap4; int i; - for (i = 0; i < MMR_LAST; i++) { + ud->mmrs[MMR_GCFG] = (uint32_t *)devfdt_get_addr_name(dev, mmr_names[MMR_GCFG]); + if (!ud->mmrs[MMR_GCFG]) + return -EINVAL; + + cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); + cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); + + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + ud->rflow_cnt = cap3 & 0x3fff; + ud->tchan_cnt = cap2 & 0x1ff; + ud->echan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + break; + case DMA_TYPE_BCDMA: + ud->bchan_cnt = cap2 & 0x1ff; + ud->tchan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + break; + case DMA_TYPE_PKTDMA: + cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); + ud->tchan_cnt = cap2 & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + ud->rflow_cnt = cap3 & 0x3fff; + ud->tflow_cnt = cap4 & 0x3fff; + break; + default: + return -EINVAL; + } + + for (i = 1; i < MMR_LAST; i++) { + if (i == MMR_BCHANRT && ud->bchan_cnt == 0) + continue; + if (i == MMR_TCHANRT && ud->tchan_cnt == 0) + continue; + if (i == MMR_RCHANRT && ud->rchan_cnt == 0) + continue; + ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev, mmr_names[i]); if (!ud->mmrs[i]) @@ -1168,23 +1305,10 @@ static int udma_get_mmrs(struct udevice *dev) static int udma_setup_resources(struct udma_dev *ud) { struct udevice *dev = ud->dev; - int ch_count, i; - u32 cap2, cap3; + int i; struct ti_sci_resource_desc *rm_desc; struct ti_sci_resource *rm_res; struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; - static const char * const range_names[] = { "ti,sci-rm-range-tchan", - "ti,sci-rm-range-rchan", - "ti,sci-rm-range-rflow" }; - - cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); - cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); - - ud->rflow_cnt = cap3 & 0x3fff; - ud->tchan_cnt = cap2 & 0x1ff; - ud->echan_cnt = (cap2 >> 9) & 0x1ff; - ud->rchan_cnt = (cap2 >> 18) & 0x1ff; - ch_count = ud->tchan_cnt + ud->rchan_cnt; ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), sizeof(unsigned long), GFP_KERNEL); @@ -1215,11 +1339,15 @@ static int udma_setup_resources(struct udma_dev *ud) bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt); /* Get resource ranges from tisci */ - for (i = 0; i < RM_RANGE_LAST; i++) + for (i = 0; i < RM_RANGE_LAST; i++) { + if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW) + continue; + tisci_rm->rm_ranges[i] = devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, tisci_rm->tisci_dev_id, (char *)range_names[i]); + } /* tchan ranges */ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; @@ -1266,6 +1394,229 @@ static int udma_setup_resources(struct udma_dev *ud) } } + return 0; +} + +static int bcdma_setup_resources(struct udma_dev *ud) +{ + int i; + struct udevice *dev = ud->dev; + struct ti_sci_resource_desc *rm_desc; + struct ti_sci_resource *rm_res; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), + GFP_KERNEL); + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + /* BCDMA do not really have flows, but the driver expect it */ + ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + + if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || + !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || + !ud->rflows) + return -ENOMEM; + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) { + if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW) + continue; + + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + } + + /* bchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->bchan_map, ud->bchan_cnt); + } else { + bitmap_fill(ud->bchan_map, ud->bchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->bchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* rchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + return 0; +} + +static int pktdma_setup_resources(struct udma_dev *ud) +{ + int i; + struct udevice *dev = ud->dev; + struct ti_sci_resource *rm_res; + struct ti_sci_resource_desc *rm_desc; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), + sizeof(unsigned long), GFP_KERNEL); + + if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || + !ud->rchans || !ud->rflows || !ud->rflow_in_use) + return -ENOMEM; + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) { + if (i == RM_RANGE_BCHAN) + continue; + + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + } + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* rchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* rflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; + if (IS_ERR(rm_res)) { + /* all rflows are assigned exclusively to Linux */ + bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); + } else { + bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rflow_in_use, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + /* tflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; + if (IS_ERR(rm_res)) { + /* all tflows are assigned exclusively to Linux */ + bitmap_zero(ud->tflow_map, ud->tflow_cnt); + } else { + bitmap_fill(ud->tflow_map, ud->tflow_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tflow_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + return 0; +} + +static int setup_resources(struct udma_dev *ud) +{ + struct udevice *dev = ud->dev; + int ch_count, ret; + + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + ret = udma_setup_resources(ud); + break; + case DMA_TYPE_BCDMA: + ret = bcdma_setup_resources(ud); + break; + case DMA_TYPE_PKTDMA: + ret = pktdma_setup_resources(ud); + break; + default: + return -EINVAL; + } + + if (ret) + return ret; + + ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; + if (ud->bchan_cnt) + ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); if (!ch_count) @@ -1276,13 +1627,45 @@ static int udma_setup_resources(struct udma_dev *ud) if (!ud->channels) return -ENOMEM; - dev_info(dev, - "Channels: %d (tchan: %u, echan: %u, rchan: %u, rflow: %u)\n", - ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt, - ud->rflow_cnt); + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + dev_dbg(dev, + "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", + ch_count, + ud->tchan_cnt - bitmap_weight(ud->tchan_map, + ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, + ud->rchan_cnt), + ud->rflow_cnt - bitmap_weight(ud->rflow_map, + ud->rflow_cnt)); + break; + case DMA_TYPE_BCDMA: + dev_dbg(dev, + "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n", + ch_count, + ud->bchan_cnt - bitmap_weight(ud->bchan_map, + ud->bchan_cnt), + ud->tchan_cnt - bitmap_weight(ud->tchan_map, + ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, + ud->rchan_cnt)); + break; + case DMA_TYPE_PKTDMA: + dev_dbg(dev, + "Channels: %d (tchan: %u, rchan: %u)\n", + ch_count, + ud->tchan_cnt - bitmap_weight(ud->tchan_map, + ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, + ud->rchan_cnt)); + break; + default: + break; + } return ch_count; } + static int udma_probe(struct udevice *dev) { struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev); @@ -1294,17 +1677,11 @@ static int udma_probe(struct udevice *dev) ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev)); + ud->match_data = (void *)dev_get_driver_data(dev); ret = udma_get_mmrs(dev); if (ret) return ret; - ret = uclass_get_device_by_phandle(UCLASS_MISC, dev, - "ti,ringacc", &tmp); - ud->ringacc = dev_get_priv(tmp); - if (IS_ERR(ud->ringacc)) - return PTR_ERR(ud->ringacc); - - ud->match_data = (void *)dev_get_driver_data(dev); ud->psil_base = ud->match_data->psil_base; ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev, @@ -1335,16 +1712,40 @@ static int udma_probe(struct udevice *dev) tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops; tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops; + if (ud->match_data->type == DMA_TYPE_UDMA) { + ret = uclass_get_device_by_phandle(UCLASS_MISC, dev, + "ti,ringacc", &tmp); + ud->ringacc = dev_get_priv(tmp); + } else { + struct k3_ringacc_init_data ring_init_data; + + ring_init_data.tisci = ud->tisci_rm.tisci; + ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; + if (ud->match_data->type == DMA_TYPE_BCDMA) { + ring_init_data.num_rings = ud->bchan_cnt + + ud->tchan_cnt + + ud->rchan_cnt; + } else { + ring_init_data.num_rings = ud->rflow_cnt + + ud->tflow_cnt; + } + + ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data); + } + if (IS_ERR(ud->ringacc)) + return PTR_ERR(ud->ringacc); + ud->dev = dev; - ud->ch_count = udma_setup_resources(ud); + ud->ch_count = setup_resources(ud); if (ud->ch_count <= 0) return ud->ch_count; - dev_info(dev, - "Number of channels: %u (tchan: %u, echan: %u, rchan: %u dev-id %u)\n", - ud->ch_count, ud->tchan_cnt, ud->echan_cnt, ud->rchan_cnt, - tisci_rm->tisci_dev_id); - dev_info(dev, "Number of rflows: %u\n", ud->rflow_cnt); + for (i = 0; i < ud->bchan_cnt; i++) { + struct udma_bchan *bchan = &ud->bchans[i]; + + bchan->id = i; + bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; + } for (i = 0; i < ud->tchan_cnt; i++) { struct udma_tchan *tchan = &ud->tchans[i]; @@ -1372,15 +1773,19 @@ static int udma_probe(struct udevice *dev) uc->ud = ud; uc->id = i; uc->config.remote_thread_id = -1; + uc->bchan = NULL; uc->tchan = NULL; uc->rchan = NULL; + uc->config.mapped_channel_id = -1; + uc->config.default_flow_id = -1; uc->config.dir = DMA_MEM_TO_MEM; sprintf(uc->name, "UDMA chan%d\n", i); if (!i) uc->in_use = true; } - pr_debug("UDMA(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + dev->name, udma_read(ud->mmrs[MMR_GCFG], 0), udma_read(ud->mmrs[MMR_GCFG], 0x20), udma_read(ud->mmrs[MMR_GCFG], 0x24), @@ -1496,6 +1901,379 @@ static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest, return 0; } +#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID) + +#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID) + +#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID) + +#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) + +#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID) + +static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + struct udma_bchan *bchan = uc->bchan; + int ret = 0; + + req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN; + req_tx.index = bchan->id; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) + dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); + + return ret; +} + +static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id) +{ + if (id >= 0) { + if (test_bit(id, ud->bchan_map)) { + dev_err(ud->dev, "bchan%d is in use\n", id); + return ERR_PTR(-ENOENT); + } + } else { + id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0); + if (id == ud->bchan_cnt) + return ERR_PTR(-ENOENT); + } + __set_bit(id, ud->bchan_map); + return &ud->bchans[id]; +} + +static int bcdma_get_bchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->bchan) { + dev_err(ud->dev, "chan%d: already have bchan%d allocated\n", + uc->id, uc->bchan->id); + return 0; + } + + uc->bchan = __bcdma_reserve_bchan(ud, -1); + if (IS_ERR(uc->bchan)) + return PTR_ERR(uc->bchan); + + uc->tchan = uc->bchan; + + return 0; +} + +static void bcdma_put_bchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->bchan) { + dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, + uc->bchan->id); + __clear_bit(uc->bchan->id, ud->bchan_map); + uc->bchan = NULL; + uc->tchan = NULL; + } +} + +static void bcdma_free_bchan_resources(struct udma_chan *uc) +{ + if (!uc->bchan) + return; + + k3_nav_ringacc_ring_free(uc->bchan->tc_ring); + k3_nav_ringacc_ring_free(uc->bchan->t_ring); + uc->bchan->tc_ring = NULL; + uc->bchan->t_ring = NULL; + + bcdma_put_bchan(uc); +} + +static int bcdma_alloc_bchan_resources(struct udma_chan *uc) +{ + struct k3_nav_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + int ret; + + ret = bcdma_get_bchan(uc); + if (ret) + return ret; + + ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, + &uc->bchan->t_ring, + &uc->bchan->tc_ring); + if (ret) { + ret = -EBUSY; + goto err_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = 16; + ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING; + + ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_nav_ringacc_ring_free(uc->bchan->tc_ring); + uc->bchan->tc_ring = NULL; + k3_nav_ringacc_ring_free(uc->bchan->t_ring); + uc->bchan->t_ring = NULL; +err_ring: + bcdma_put_bchan(uc); + + return ret; +} + +static int bcdma_tisci_tx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + int ret = 0; + + req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_supr_tdpkt = uc->config.notdpkt; + if (uc->config.ep_type == PSIL_EP_PDMA_XY && + ud->match_data->flags & UDMA_FLAG_TDTYPE) { + /* wait for peer to complete the teardown for PDMAs */ + req_tx.valid_params |= + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID; + req_tx.tx_tdtype = 1; + } + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + + return ret; +} + +#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config + +static int pktdma_tisci_rx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; + int ret = 0; + + req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = uc->rchan->id; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) { + dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); + return ret; + } + + flow_req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID; + + flow_req.nav_id = tisci_rm->tisci_dev_id; + flow_req.flow_index = uc->rflow->id; + + if (uc->config.needs_epib) + flow_req.rx_einfo_present = 1; + else + flow_req.rx_einfo_present = 0; + if (uc->config.psd_size) + flow_req.rx_psinfo_present = 1; + else + flow_req.rx_psinfo_present = 0; + flow_req.rx_error_handling = 1; + + ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); + + if (ret) + dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, + ret); + + return ret; +} + +static int bcdma_alloc_chan_resources(struct udma_chan *uc) +{ + int ret; + + uc->config.pkt_mode = false; + + switch (uc->config.dir) { + case DMA_MEM_TO_MEM: + /* Non synchronized - mem to mem type of transfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, + uc->id); + + ret = bcdma_alloc_bchan_resources(uc); + if (ret) + return ret; + + ret = bcdma_tisci_m2m_channel_config(uc); + break; + default: + /* Can not happen */ + dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* check if the channel configuration was successful */ + if (ret) + goto err_res_free; + + if (udma_is_chan_running(uc)) { + dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_res_free; + } + } + + udma_reset_rings(uc); + + return 0; + +err_res_free: + bcdma_free_bchan_resources(uc); + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + udma_reset_uchan(uc); + + return ret; +} + +static int pktdma_alloc_chan_resources(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + int ret; + + switch (uc->config.dir) { + case DMA_MEM_TO_DEV: + /* Slave transfer synchronized - mem to dev (TX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, + uc->id); + + ret = udma_alloc_tx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = uc->config.remote_thread_id; + uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + + ret = pktdma_tisci_tx_channel_config(uc); + break; + case DMA_DEV_TO_MEM: + /* Slave transfer synchronized - dev to mem (RX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, + uc->id); + + ret = udma_alloc_rx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = uc->config.remote_thread_id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + ret = pktdma_tisci_rx_channel_config(uc); + break; + default: + /* Can not happen */ + dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* check if the channel configuration was successful */ + if (ret) + goto err_res_free; + + /* PSI-L pairing */ + ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); + if (ret) { + dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", + uc->config.src_thread, uc->config.dst_thread); + goto err_res_free; + } + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_res_free; + } + } + + udma_reset_rings(uc); + + if (uc->tchan) + dev_dbg(ud->dev, + "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n", + uc->id, uc->tchan->id, uc->tchan->tflow_id, + uc->config.remote_thread_id); + else if (uc->rchan) + dev_dbg(ud->dev, + "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n", + uc->id, uc->rchan->id, uc->rflow->id, + uc->config.remote_thread_id); + return 0; + +err_res_free: + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + udma_reset_uchan(uc); + + return ret; +} + static int udma_transfer(struct udevice *dev, int direction, void *dst, void *src, size_t len) { @@ -1505,7 +2283,16 @@ static int udma_transfer(struct udevice *dev, int direction, dma_addr_t paddr = 0; int ret; - ret = udma_alloc_chan_resources(uc); + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + ret = udma_alloc_chan_resources(uc); + break; + case DMA_TYPE_BCDMA: + ret = bcdma_alloc_chan_resources(uc); + break; + default: + return -EINVAL; + }; if (ret) return ret; @@ -1514,7 +2301,17 @@ static int udma_transfer(struct udevice *dev, int direction, udma_poll_completion(uc, &paddr); udma_stop(uc); - udma_free_chan_resources(uc); + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + udma_free_chan_resources(uc); + break; + case DMA_TYPE_BCDMA: + bcdma_free_bchan_resources(uc); + break; + default: + return -EINVAL; + }; + return 0; } @@ -1533,7 +2330,19 @@ static int udma_request(struct dma *dma) uc = &ud->channels[dma->id]; ucc = &uc->config; - ret = udma_alloc_chan_resources(uc); + switch (ud->match_data->type) { + case DMA_TYPE_UDMA: + ret = udma_alloc_chan_resources(uc); + break; + case DMA_TYPE_BCDMA: + ret = bcdma_alloc_chan_resources(uc); + break; + case DMA_TYPE_PKTDMA: + ret = pktdma_alloc_chan_resources(uc); + break; + default: + return -EINVAL; + } if (ret) { dev_err(dma->dev, "alloc dma res failed %d\n", ret); return -EINVAL; @@ -1573,7 +2382,14 @@ static int udma_rfree(struct dma *dma) if (udma_is_chan_running(uc)) udma_stop(uc); - udma_free_chan_resources(uc); + + udma_navss_psil_unpair(ud, uc->config.src_thread, + uc->config.dst_thread); + + bcdma_free_bchan_resources(uc); + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + udma_reset_uchan(uc); uc->in_use = false; @@ -1764,6 +2580,15 @@ static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args) ucc->notdpkt = ep_config->notdpkt; ucc->ep_type = ep_config->ep_type; + if (ud->match_data->type == DMA_TYPE_PKTDMA && + ep_config->mapped_channel_id >= 0) { + ucc->mapped_channel_id = ep_config->mapped_channel_id; + ucc->default_flow_id = ep_config->default_flow_id; + } else { + ucc->mapped_channel_id = -1; + ucc->default_flow_id = -1; + } + ucc->needs_epib = ep_config->needs_epib; ucc->psd_size = ep_config->psd_size; ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size; @@ -1859,10 +2684,13 @@ static const struct dma_ops udma_ops = { }; static struct udma_match_data am654_main_data = { + .type = DMA_TYPE_UDMA, .psil_base = 0x1000, .enable_memcpy_support = true, .statictr_z_mask = GENMASK(11, 0), - .rchan_oes_offset = 0x200, + .oes = { + .udma_rchan = 0x200, + }, .tpl_levels = 2, .level_start_idx = { [0] = 8, /* Normal channels */ @@ -1871,10 +2699,13 @@ static struct udma_match_data am654_main_data = { }; static struct udma_match_data am654_mcu_data = { + .type = DMA_TYPE_UDMA, .psil_base = 0x6000, .enable_memcpy_support = true, .statictr_z_mask = GENMASK(11, 0), - .rchan_oes_offset = 0x200, + .oes = { + .udma_rchan = 0x200, + }, .tpl_levels = 2, .level_start_idx = { [0] = 2, /* Normal channels */ @@ -1883,11 +2714,14 @@ static struct udma_match_data am654_mcu_data = { }; static struct udma_match_data j721e_main_data = { + .type = DMA_TYPE_UDMA, .psil_base = 0x1000, .enable_memcpy_support = true, .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, .statictr_z_mask = GENMASK(23, 0), - .rchan_oes_offset = 0x400, + .oes = { + .udma_rchan = 0x400, + }, .tpl_levels = 3, .level_start_idx = { [0] = 16, /* Normal channels */ @@ -1897,11 +2731,14 @@ static struct udma_match_data j721e_main_data = { }; static struct udma_match_data j721e_mcu_data = { + .type = DMA_TYPE_UDMA, .psil_base = 0x6000, .enable_memcpy_support = true, .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, .statictr_z_mask = GENMASK(23, 0), - .rchan_oes_offset = 0x400, + .oes = { + .udma_rchan = 0x400, + }, .tpl_levels = 2, .level_start_idx = { [0] = 2, /* Normal channels */ @@ -1909,6 +2746,36 @@ static struct udma_match_data j721e_mcu_data = { }, }; +static struct udma_match_data am64_bcdma_data = { + .type = DMA_TYPE_BCDMA, + .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */ + .enable_memcpy_support = true, /* Supported via bchan */ + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .statictr_z_mask = GENMASK(23, 0), + .oes = { + .bcdma_bchan_data = 0x2200, + .bcdma_bchan_ring = 0x2400, + .bcdma_tchan_data = 0x2800, + .bcdma_tchan_ring = 0x2a00, + .bcdma_rchan_data = 0x2e00, + .bcdma_rchan_ring = 0x3000, + }, + /* No throughput levels */ +}; + +static struct udma_match_data am64_pktdma_data = { + .type = DMA_TYPE_PKTDMA, + .psil_base = 0x1000, + .enable_memcpy_support = false, + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE, + .statictr_z_mask = GENMASK(23, 0), + .oes = { + .pktdma_tchan_flow = 0x1200, + .pktdma_rchan_flow = 0x1600, + }, + /* No throughput levels */ +}; + static const struct udevice_id udma_ids[] = { { .compatible = "ti,am654-navss-main-udmap", @@ -1924,6 +2791,14 @@ static const struct udevice_id udma_ids[] = { .compatible = "ti,j721e-navss-mcu-udmap", .data = (ulong)&j721e_mcu_data, }, + { + .compatible = "ti,am64-dmss-bcdma", + .data = (ulong)&am64_bcdma_data, + }, + { + .compatible = "ti,am64-dmss-pktdma", + .data = (ulong)&am64_pktdma_data, + }, { /* Sentinel */ }, };