u-boot-brain/drivers/net/designware.c
Joe Hershberger 1fd92db83d net: cosmetic: Fix var naming net <-> eth drivers
Update the naming convention used in the network stack functions and
variables that Ethernet drivers use to interact with it.

This cleans up the temporary hacks that were added to this interface
along with the DM support.

This patch has a few remaining checkpatch.pl failures that would be out
of the scope of this patch to fix (drivers that are in gross violation
of checkpatch.pl).

Signed-off-by: Joe Hershberger <joe.hershberger@ni.com>
Acked-by: Simon Glass <sjg@chromium.org>
2015-04-18 11:11:33 -06:00

470 lines
12 KiB
C

/*
* (C) Copyright 2010
* Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
*
* SPDX-License-Identifier: GPL-2.0+
*/
/*
* Designware ethernet IP driver for u-boot
*/
#include <common.h>
#include <miiphy.h>
#include <malloc.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <asm/io.h>
#include "designware.h"
#if !defined(CONFIG_PHYLIB)
# error "DesignWare Ether MAC requires PHYLIB - missing CONFIG_PHYLIB"
#endif
static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
{
struct eth_mac_regs *mac_p = bus->priv;
ulong start;
u16 miiaddr;
int timeout = CONFIG_MDIO_TIMEOUT;
miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
((reg << MIIREGSHIFT) & MII_REGMSK);
writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
start = get_timer(0);
while (get_timer(start) < timeout) {
if (!(readl(&mac_p->miiaddr) & MII_BUSY))
return readl(&mac_p->miidata);
udelay(10);
};
return -1;
}
static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
u16 val)
{
struct eth_mac_regs *mac_p = bus->priv;
ulong start;
u16 miiaddr;
int ret = -1, timeout = CONFIG_MDIO_TIMEOUT;
writel(val, &mac_p->miidata);
miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
start = get_timer(0);
while (get_timer(start) < timeout) {
if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
ret = 0;
break;
}
udelay(10);
};
return ret;
}
static int dw_mdio_init(char *name, struct eth_mac_regs *mac_regs_p)
{
struct mii_dev *bus = mdio_alloc();
if (!bus) {
printf("Failed to allocate MDIO bus\n");
return -1;
}
bus->read = dw_mdio_read;
bus->write = dw_mdio_write;
sprintf(bus->name, name);
bus->priv = (void *)mac_regs_p;
return mdio_register(bus);
}
static void tx_descs_init(struct eth_device *dev)
{
struct dw_eth_dev *priv = dev->priv;
struct eth_dma_regs *dma_p = priv->dma_regs_p;
struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
char *txbuffs = &priv->txbuffs[0];
struct dmamacdescr *desc_p;
u32 idx;
for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
desc_p = &desc_table_p[idx];
desc_p->dmamac_addr = &txbuffs[idx * CONFIG_ETH_BUFSIZE];
desc_p->dmamac_next = &desc_table_p[idx + 1];
#if defined(CONFIG_DW_ALTDESCRIPTOR)
desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS | \
DESC_TXSTS_TXCHECKINSCTRL | \
DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
desc_p->dmamac_cntl = 0;
desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
#else
desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
desc_p->txrx_status = 0;
#endif
}
/* Correcting the last pointer of the chain */
desc_p->dmamac_next = &desc_table_p[0];
/* Flush all Tx buffer descriptors at once */
flush_dcache_range((unsigned int)priv->tx_mac_descrtable,
(unsigned int)priv->tx_mac_descrtable +
sizeof(priv->tx_mac_descrtable));
writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
priv->tx_currdescnum = 0;
}
static void rx_descs_init(struct eth_device *dev)
{
struct dw_eth_dev *priv = dev->priv;
struct eth_dma_regs *dma_p = priv->dma_regs_p;
struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
char *rxbuffs = &priv->rxbuffs[0];
struct dmamacdescr *desc_p;
u32 idx;
/* Before passing buffers to GMAC we need to make sure zeros
* written there right after "priv" structure allocation were
* flushed into RAM.
* Otherwise there's a chance to get some of them flushed in RAM when
* GMAC is already pushing data to RAM via DMA. This way incoming from
* GMAC data will be corrupted. */
flush_dcache_range((unsigned int)rxbuffs, (unsigned int)rxbuffs +
RX_TOTAL_BUFSIZE);
for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
desc_p = &desc_table_p[idx];
desc_p->dmamac_addr = &rxbuffs[idx * CONFIG_ETH_BUFSIZE];
desc_p->dmamac_next = &desc_table_p[idx + 1];
desc_p->dmamac_cntl =
(MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) | \
DESC_RXCTRL_RXCHAIN;
desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
}
/* Correcting the last pointer of the chain */
desc_p->dmamac_next = &desc_table_p[0];
/* Flush all Rx buffer descriptors at once */
flush_dcache_range((unsigned int)priv->rx_mac_descrtable,
(unsigned int)priv->rx_mac_descrtable +
sizeof(priv->rx_mac_descrtable));
writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
priv->rx_currdescnum = 0;
}
static int dw_write_hwaddr(struct eth_device *dev)
{
struct dw_eth_dev *priv = dev->priv;
struct eth_mac_regs *mac_p = priv->mac_regs_p;
u32 macid_lo, macid_hi;
u8 *mac_id = &dev->enetaddr[0];
macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
(mac_id[3] << 24);
macid_hi = mac_id[4] + (mac_id[5] << 8);
writel(macid_hi, &mac_p->macaddr0hi);
writel(macid_lo, &mac_p->macaddr0lo);
return 0;
}
static void dw_adjust_link(struct eth_mac_regs *mac_p,
struct phy_device *phydev)
{
u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
if (!phydev->link) {
printf("%s: No link.\n", phydev->dev->name);
return;
}
if (phydev->speed != 1000)
conf |= MII_PORTSELECT;
if (phydev->speed == 100)
conf |= FES_100;
if (phydev->duplex)
conf |= FULLDPLXMODE;
writel(conf, &mac_p->conf);
printf("Speed: %d, %s duplex%s\n", phydev->speed,
(phydev->duplex) ? "full" : "half",
(phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
}
static void dw_eth_halt(struct eth_device *dev)
{
struct dw_eth_dev *priv = dev->priv;
struct eth_mac_regs *mac_p = priv->mac_regs_p;
struct eth_dma_regs *dma_p = priv->dma_regs_p;
writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
phy_shutdown(priv->phydev);
}
static int dw_eth_init(struct eth_device *dev, bd_t *bis)
{
struct dw_eth_dev *priv = dev->priv;
struct eth_mac_regs *mac_p = priv->mac_regs_p;
struct eth_dma_regs *dma_p = priv->dma_regs_p;
unsigned int start;
writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
start = get_timer(0);
while (readl(&dma_p->busmode) & DMAMAC_SRST) {
if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
printf("DMA reset timeout\n");
return -1;
}
mdelay(100);
};
/* Soft reset above clears HW address registers.
* So we have to set it here once again */
dw_write_hwaddr(dev);
rx_descs_init(dev);
tx_descs_init(dev);
writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
#ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
&dma_p->opmode);
#else
writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
&dma_p->opmode);
#endif
writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
#ifdef CONFIG_DW_AXI_BURST_LEN
writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
#endif
/* Start up the PHY */
if (phy_startup(priv->phydev)) {
printf("Could not initialize PHY %s\n",
priv->phydev->dev->name);
return -1;
}
dw_adjust_link(mac_p, priv->phydev);
if (!priv->phydev->link)
return -1;
writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
return 0;
}
static int dw_eth_send(struct eth_device *dev, void *packet, int length)
{
struct dw_eth_dev *priv = dev->priv;
struct eth_dma_regs *dma_p = priv->dma_regs_p;
u32 desc_num = priv->tx_currdescnum;
struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
uint32_t desc_start = (uint32_t)desc_p;
uint32_t desc_end = desc_start +
roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
uint32_t data_end = data_start +
roundup(length, ARCH_DMA_MINALIGN);
/*
* Strictly we only need to invalidate the "txrx_status" field
* for the following check, but on some platforms we cannot
* invalidate only 4 bytes, so we flush the entire descriptor,
* which is 16 bytes in total. This is safe because the
* individual descriptors in the array are each aligned to
* ARCH_DMA_MINALIGN and padded appropriately.
*/
invalidate_dcache_range(desc_start, desc_end);
/* Check if the descriptor is owned by CPU */
if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
printf("CPU not owner of tx frame\n");
return -1;
}
memcpy(desc_p->dmamac_addr, packet, length);
/* Flush data to be sent */
flush_dcache_range(data_start, data_end);
#if defined(CONFIG_DW_ALTDESCRIPTOR)
desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
desc_p->dmamac_cntl |= (length << DESC_TXCTRL_SIZE1SHFT) & \
DESC_TXCTRL_SIZE1MASK;
desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
#else
desc_p->dmamac_cntl |= ((length << DESC_TXCTRL_SIZE1SHFT) & \
DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST | \
DESC_TXCTRL_TXFIRST;
desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
#endif
/* Flush modified buffer descriptor */
flush_dcache_range(desc_start, desc_end);
/* Test the wrap-around condition. */
if (++desc_num >= CONFIG_TX_DESCR_NUM)
desc_num = 0;
priv->tx_currdescnum = desc_num;
/* Start the transmission */
writel(POLL_DATA, &dma_p->txpolldemand);
return 0;
}
static int dw_eth_recv(struct eth_device *dev)
{
struct dw_eth_dev *priv = dev->priv;
u32 status, desc_num = priv->rx_currdescnum;
struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
int length = 0;
uint32_t desc_start = (uint32_t)desc_p;
uint32_t desc_end = desc_start +
roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
uint32_t data_end;
/* Invalidate entire buffer descriptor */
invalidate_dcache_range(desc_start, desc_end);
status = desc_p->txrx_status;
/* Check if the owner is the CPU */
if (!(status & DESC_RXSTS_OWNBYDMA)) {
length = (status & DESC_RXSTS_FRMLENMSK) >> \
DESC_RXSTS_FRMLENSHFT;
/* Invalidate received data */
data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
invalidate_dcache_range(data_start, data_end);
net_process_received_packet(desc_p->dmamac_addr, length);
/*
* Make the current descriptor valid again and go to
* the next one
*/
desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
/* Flush only status field - others weren't changed */
flush_dcache_range(desc_start, desc_end);
/* Test the wrap-around condition. */
if (++desc_num >= CONFIG_RX_DESCR_NUM)
desc_num = 0;
}
priv->rx_currdescnum = desc_num;
return length;
}
static int dw_phy_init(struct eth_device *dev)
{
struct dw_eth_dev *priv = dev->priv;
struct phy_device *phydev;
int mask = 0xffffffff;
#ifdef CONFIG_PHY_ADDR
mask = 1 << CONFIG_PHY_ADDR;
#endif
phydev = phy_find_by_mask(priv->bus, mask, priv->interface);
if (!phydev)
return -1;
phy_connect_dev(phydev, dev);
phydev->supported &= PHY_GBIT_FEATURES;
phydev->advertising = phydev->supported;
priv->phydev = phydev;
phy_config(phydev);
return 1;
}
int designware_initialize(ulong base_addr, u32 interface)
{
struct eth_device *dev;
struct dw_eth_dev *priv;
dev = (struct eth_device *) malloc(sizeof(struct eth_device));
if (!dev)
return -ENOMEM;
/*
* Since the priv structure contains the descriptors which need a strict
* buswidth alignment, memalign is used to allocate memory
*/
priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
sizeof(struct dw_eth_dev));
if (!priv) {
free(dev);
return -ENOMEM;
}
memset(dev, 0, sizeof(struct eth_device));
memset(priv, 0, sizeof(struct dw_eth_dev));
sprintf(dev->name, "dwmac.%lx", base_addr);
dev->iobase = (int)base_addr;
dev->priv = priv;
priv->dev = dev;
priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
DW_DMA_BASE_OFFSET);
dev->init = dw_eth_init;
dev->send = dw_eth_send;
dev->recv = dw_eth_recv;
dev->halt = dw_eth_halt;
dev->write_hwaddr = dw_write_hwaddr;
eth_register(dev);
priv->interface = interface;
dw_mdio_init(dev->name, priv->mac_regs_p);
priv->bus = miiphy_get_dev_by_name(dev->name);
return dw_phy_init(dev);
}