diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/Kconfig | 6 | ||||
-rw-r--r-- | drivers/net/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/bcm-sf2-eth-gmac.c | 113 | ||||
-rw-r--r-- | drivers/net/bcm-sf2-eth.h | 4 | ||||
-rw-r--r-- | drivers/net/enc28j60.c | 17 | ||||
-rw-r--r-- | drivers/net/gmac_rockchip.c | 140 | ||||
-rw-r--r-- | drivers/net/greth.c | 677 | ||||
-rw-r--r-- | drivers/net/greth.h | 81 | ||||
-rw-r--r-- | drivers/net/ldpaa_eth/ls2080a.c | 30 | ||||
-rw-r--r-- | drivers/net/mvneta.c | 125 | ||||
-rw-r--r-- | drivers/net/mvpp2.c | 1845 | ||||
-rw-r--r-- | drivers/net/phy/Kconfig | 10 | ||||
-rw-r--r-- | drivers/net/phy/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/phy/fixed.c | 82 | ||||
-rw-r--r-- | drivers/net/phy/phy.c | 23 | ||||
-rw-r--r-- | drivers/net/smc91111.h | 7 | ||||
-rw-r--r-- | drivers/net/sun8i_emac.c | 88 | ||||
-rw-r--r-- | drivers/net/sunxi_emac.c | 19 |
18 files changed, 2132 insertions, 1137 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 70e36611ea..8aa92790f4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -124,12 +124,12 @@ config FEC_MXC NXP i.MX processors. config MVPP2 - bool "Marvell Armada 375 network interface support" - depends on ARMADA_375 + bool "Marvell Armada 375/7K/8K network interface support" + depends on ARMADA_375 || ARMADA_8K select PHYLIB help This driver supports the network interface units in the - Marvell ARMADA 375 SoC. + Marvell ARMADA 375, 7K and 8K SoCs. config MACB bool "Cadence MACB/GEM Ethernet Interface" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 2493a48b88..ac7e07bfdf 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_FSLDMAFEC) += fsl_mcdmafec.o mcfmii.o obj-$(CONFIG_FTGMAC100) += ftgmac100.o obj-$(CONFIG_FTMAC110) += ftmac110.o obj-$(CONFIG_FTMAC100) += ftmac100.o -obj-$(CONFIG_GRETH) += greth.o obj-$(CONFIG_GMAC_ROCKCHIP) += gmac_rockchip.o obj-$(CONFIG_DRIVER_TI_KEYSTONE_NET) += keystone_net.o obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o diff --git a/drivers/net/bcm-sf2-eth-gmac.c b/drivers/net/bcm-sf2-eth-gmac.c index f2853cfad2..9ff72fa1ed 100644 --- a/drivers/net/bcm-sf2-eth-gmac.c +++ b/drivers/net/bcm-sf2-eth-gmac.c @@ -1,5 +1,5 @@ /* - * Copyright 2014 Broadcom Corporation. + * Copyright 2014-2017 Broadcom. * * SPDX-License-Identifier: GPL-2.0+ */ @@ -28,6 +28,10 @@ } \ } +#define RX_BUF_SIZE_ALIGNED ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN) +#define TX_BUF_SIZE_ALIGNED ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN) +#define DESCP_SIZE_ALIGNED ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN) + static int gmac_disable_dma(struct eth_dma *dma, int dir); static int gmac_enable_dma(struct eth_dma *dma, int dir); @@ -114,7 +118,7 @@ static void dma_tx_dump(struct eth_dma *dma) printf("TX Buffers:\n"); /* Initialize TX DMA descriptor table */ for (i = 0; i < TX_BUF_NUM; i++) { - bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE); + bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED); printf("buf%d:0x%x; ", i, (uint32_t)bufp); } printf("\n"); @@ -145,7 +149,7 @@ static void dma_rx_dump(struct eth_dma *dma) printf("RX Buffers:\n"); for (i = 0; i < RX_BUF_NUM; i++) { - bufp = dma->rx_buf + i * RX_BUF_SIZE; + bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED; printf("buf%d:0x%x; ", i, (uint32_t)bufp); } printf("\n"); @@ -163,15 +167,15 @@ static int dma_tx_init(struct eth_dma *dma) /* clear descriptor memory */ memset((void *)(dma->tx_desc_aligned), 0, - TX_BUF_NUM * sizeof(dma64dd_t)); - memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE); + TX_BUF_NUM * DESCP_SIZE_ALIGNED); + memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED); /* Initialize TX DMA descriptor table */ for (i = 0; i < TX_BUF_NUM; i++) { descp = (dma64dd_t *)(dma->tx_desc_aligned) + i; - bufp = dma->tx_buf + i * TX_BUF_SIZE; + bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED; /* clear buffer memory */ - memset((void *)bufp, 0, TX_BUF_SIZE); + memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED); ctrl = 0; /* if last descr set endOfTable */ @@ -187,10 +191,11 @@ static int dma_tx_init(struct eth_dma *dma) descp = dma->tx_desc_aligned; bufp = dma->tx_buf; flush_dcache_range((unsigned long)descp, - (unsigned long)(descp + - sizeof(dma64dd_t) * TX_BUF_NUM)); - flush_dcache_range((unsigned long)(bufp), - (unsigned long)(bufp + TX_BUF_SIZE * TX_BUF_NUM)); + (unsigned long)descp + + DESCP_SIZE_ALIGNED * TX_BUF_NUM); + flush_dcache_range((unsigned long)bufp, + (unsigned long)bufp + + TX_BUF_SIZE_ALIGNED * TX_BUF_NUM); /* initialize the DMA channel */ writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR); @@ -215,20 +220,20 @@ static int dma_rx_init(struct eth_dma *dma) /* clear descriptor memory */ memset((void *)(dma->rx_desc_aligned), 0, - RX_BUF_NUM * sizeof(dma64dd_t)); + RX_BUF_NUM * DESCP_SIZE_ALIGNED); /* clear buffer memory */ - memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE); + memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED); /* Initialize RX DMA descriptor table */ for (i = 0; i < RX_BUF_NUM; i++) { descp = (dma64dd_t *)(dma->rx_desc_aligned) + i; - bufp = dma->rx_buf + i * RX_BUF_SIZE; + bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED; ctrl = 0; /* if last descr set endOfTable */ if (i == (RX_BUF_NUM - 1)) ctrl = D64_CTRL1_EOT; descp->ctrl1 = ctrl; - descp->ctrl2 = RX_BUF_SIZE; + descp->ctrl2 = RX_BUF_SIZE_ALIGNED; descp->addrlow = (uint32_t)bufp; descp->addrhigh = 0; @@ -240,10 +245,11 @@ static int dma_rx_init(struct eth_dma *dma) bufp = dma->rx_buf; /* flush descriptor and buffer */ flush_dcache_range((unsigned long)descp, - (unsigned long)(descp + - sizeof(dma64dd_t) * RX_BUF_NUM)); + (unsigned long)descp + + DESCP_SIZE_ALIGNED * RX_BUF_NUM); flush_dcache_range((unsigned long)(bufp), - (unsigned long)(bufp + RX_BUF_SIZE * RX_BUF_NUM)); + (unsigned long)bufp + + RX_BUF_SIZE_ALIGNED * RX_BUF_NUM); /* initailize the DMA channel */ writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR); @@ -292,14 +298,12 @@ static int dma_deinit(struct eth_dma *dma) free(dma->tx_buf); dma->tx_buf = NULL; - free(dma->tx_desc); - dma->tx_desc = NULL; + free(dma->tx_desc_aligned); dma->tx_desc_aligned = NULL; free(dma->rx_buf); dma->rx_buf = NULL; - free(dma->rx_desc); - dma->rx_desc = NULL; + free(dma->rx_desc_aligned); dma->rx_desc_aligned = NULL; return 0; @@ -307,7 +311,7 @@ static int dma_deinit(struct eth_dma *dma) int gmac_tx_packet(struct eth_dma *dma, void *packet, int length) { - uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE; + uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED; /* kick off the dma */ size_t len = length; @@ -348,10 +352,11 @@ int gmac_tx_packet(struct eth_dma *dma, void *packet, int length) descp->ctrl2 = ctrl; /* flush descriptor and buffer */ - flush_dcache_range((unsigned long)descp, - (unsigned long)(descp + sizeof(dma64dd_t))); + flush_dcache_range((unsigned long)dma->tx_desc_aligned, + (unsigned long)dma->tx_desc_aligned + + DESCP_SIZE_ALIGNED * TX_BUF_NUM); flush_dcache_range((unsigned long)bufp, - (unsigned long)(bufp + TX_BUF_SIZE)); + (unsigned long)bufp + TX_BUF_SIZE_ALIGNED); /* now update the dma last descriptor */ writel(last_desc, GMAC0_DMA_TX_PTR_ADDR); @@ -426,14 +431,15 @@ int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf) ; /* get the packet pointer that corresponds to the rx descriptor */ - bufp = dma->rx_buf + index * RX_BUF_SIZE; + bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED; descp = (dma64dd_t *)(dma->rx_desc_aligned) + index; /* flush descriptor and buffer */ - flush_dcache_range((unsigned long)descp, - (unsigned long)(descp + sizeof(dma64dd_t))); + flush_dcache_range((unsigned long)dma->rx_desc_aligned, + (unsigned long)dma->rx_desc_aligned + + DESCP_SIZE_ALIGNED * RX_BUF_NUM); flush_dcache_range((unsigned long)bufp, - (unsigned long)(bufp + RX_BUF_SIZE)); + (unsigned long)bufp + RX_BUF_SIZE_ALIGNED); buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK); @@ -457,12 +463,13 @@ int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf) memcpy(buf, datap, rcvlen); /* update descriptor that is being added back on ring */ - descp->ctrl2 = RX_BUF_SIZE; + descp->ctrl2 = RX_BUF_SIZE_ALIGNED; descp->addrlow = (uint32_t)bufp; descp->addrhigh = 0; /* flush descriptor */ - flush_dcache_range((unsigned long)descp, - (unsigned long)(descp + sizeof(dma64dd_t))); + flush_dcache_range((unsigned long)dma->rx_desc_aligned, + (unsigned long)dma->rx_desc_aligned + + DESCP_SIZE_ALIGNED * RX_BUF_NUM); /* set the lastdscr for the rx ring */ writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR); @@ -573,7 +580,7 @@ static int gmac_enable_dma(struct eth_dma *dma, int dir) * set the lastdscr for the rx ring */ writel(((uint32_t)(dma->rx_desc_aligned) + - (RX_BUF_NUM - 1) * RX_BUF_SIZE) & + (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR); } @@ -893,54 +900,52 @@ int gmac_add(struct eth_device *dev) void *tmp; /* - * Desc has to be 16-byte aligned ? - * If it is 8-byte aligned by malloc, fail Tx + * Desc has to be 16-byte aligned. But for dcache flush it must be + * aligned to ARCH_DMA_MINALIGN. */ - tmp = malloc(sizeof(dma64dd_t) * TX_BUF_NUM + 8); + tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM); if (tmp == NULL) { printf("%s: Failed to allocate TX desc Buffer\n", __func__); return -1; } - dma->tx_desc = (void *)tmp; - dma->tx_desc_aligned = (void *)(((uint32_t)tmp) & (~0xf)); + dma->tx_desc_aligned = (void *)tmp; debug("TX Descriptor Buffer: %p; length: 0x%x\n", - dma->tx_desc_aligned, sizeof(dma64dd_t) * TX_BUF_NUM); + dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM); - tmp = malloc(TX_BUF_SIZE * TX_BUF_NUM); + tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM); if (tmp == NULL) { printf("%s: Failed to allocate TX Data Buffer\n", __func__); - free(dma->tx_desc); + free(dma->tx_desc_aligned); return -1; } dma->tx_buf = (uint8_t *)tmp; debug("TX Data Buffer: %p; length: 0x%x\n", - dma->tx_buf, TX_BUF_SIZE * TX_BUF_NUM); + dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM); - /* Desc has to be 16-byte aligned ? */ - tmp = malloc(sizeof(dma64dd_t) * RX_BUF_NUM + 8); + /* Desc has to be 16-byte aligned */ + tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM); if (tmp == NULL) { printf("%s: Failed to allocate RX Descriptor\n", __func__); - free(dma->tx_desc); + free(dma->tx_desc_aligned); free(dma->tx_buf); return -1; } - dma->rx_desc = tmp; - dma->rx_desc_aligned = (void *)(((uint32_t)tmp) & (~0xf)); + dma->rx_desc_aligned = (void *)tmp; debug("RX Descriptor Buffer: %p, length: 0x%x\n", - dma->rx_desc_aligned, sizeof(dma64dd_t) * RX_BUF_NUM); + dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM); - tmp = malloc(RX_BUF_SIZE * RX_BUF_NUM); + tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM); if (tmp == NULL) { printf("%s: Failed to allocate RX Data Buffer\n", __func__); - free(dma->tx_desc); + free(dma->tx_desc_aligned); free(dma->tx_buf); - free(dma->rx_desc); + free(dma->rx_desc_aligned); return -1; } - dma->rx_buf = tmp; + dma->rx_buf = (uint8_t *)tmp; debug("RX Data Buffer: %p; length: 0x%x\n", - dma->rx_buf, RX_BUF_SIZE * RX_BUF_NUM); + dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM); g_dmactrlflags = 0; diff --git a/drivers/net/bcm-sf2-eth.h b/drivers/net/bcm-sf2-eth.h index 6104affc51..c4e2e01003 100644 --- a/drivers/net/bcm-sf2-eth.h +++ b/drivers/net/bcm-sf2-eth.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Broadcom Corporation. + * Copyright 2014-2017 Broadcom. * * SPDX-License-Identifier: GPL-2.0+ */ @@ -30,8 +30,6 @@ enum { struct eth_dma { void *tx_desc_aligned; void *rx_desc_aligned; - void *tx_desc; - void *rx_desc; uint8_t *tx_buf; uint8_t *rx_buf; diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c index 2fe323a85a..588a84d7a9 100644 --- a/drivers/net/enc28j60.c +++ b/drivers/net/enc28j60.c @@ -489,10 +489,6 @@ static void enc_poll(enc_dev_t *enc) u8 eir_reg; u8 pkt_cnt; -#ifdef CONFIG_USE_IRQ - /* clear global interrupt enable bit in enc28j60 */ - enc_bclr(enc, CTL_REG_EIE, ENC_EIE_INTIE); -#endif (void)enc_r8(enc, CTL_REG_ESTAT); eir_reg = enc_r8(enc, CTL_REG_EIR); if (eir_reg & ENC_EIR_TXIF) { @@ -520,10 +516,6 @@ static void enc_poll(enc_dev_t *enc) printf("%s: tx error\n", enc->dev->name); enc_bclr(enc, CTL_REG_EIR, ENC_EIR_TXERIF); } -#ifdef CONFIG_USE_IRQ - /* set global interrupt enable bit in enc28j60 */ - enc_bset(enc, CTL_REG_EIE, ENC_EIE_INTIE); -#endif } /* @@ -693,15 +685,6 @@ static int enc_setup(enc_dev_t *enc) /* Reset PDPXMD-bit => half duplex */ enc_phy_write(enc, PHY_REG_PHCON1, 0); -#ifdef CONFIG_USE_IRQ - /* enable interrupts */ - enc_bset(enc, CTL_REG_EIE, ENC_EIE_PKTIE); - enc_bset(enc, CTL_REG_EIE, ENC_EIE_TXIE); - enc_bset(enc, CTL_REG_EIE, ENC_EIE_RXERIE); - enc_bset(enc, CTL_REG_EIE, ENC_EIE_TXERIE); - enc_bset(enc, CTL_REG_EIE, ENC_EIE_INTIE); -#endif - return 0; } diff --git a/drivers/net/gmac_rockchip.c b/drivers/net/gmac_rockchip.c index e9b202ab9a..5e2ca76302 100644 --- a/drivers/net/gmac_rockchip.c +++ b/drivers/net/gmac_rockchip.c @@ -14,7 +14,9 @@ #include <asm/io.h> #include <asm/arch/periph.h> #include <asm/arch/clock.h> +#include <asm/arch/hardware.h> #include <asm/arch/grf_rk3288.h> +#include <asm/arch/grf_rk3399.h> #include <dm/pinctrl.h> #include <dt-bindings/clock/rk3288-cru.h> #include "designware.h" @@ -32,32 +34,45 @@ struct gmac_rockchip_platdata { int rx_delay; }; +struct rk_gmac_ops { + int (*fix_mac_speed)(struct dw_eth_dev *priv); + void (*set_to_rgmii)(struct gmac_rockchip_platdata *pdata); +}; + + static int gmac_rockchip_ofdata_to_platdata(struct udevice *dev) { struct gmac_rockchip_platdata *pdata = dev_get_platdata(dev); + const void *blob = gd->fdt_blob; + int node = dev_of_offset(dev); + + /* Check the new naming-style first... */ + pdata->tx_delay = fdtdec_get_int(blob, node, "tx_delay", -ENOENT); + pdata->rx_delay = fdtdec_get_int(blob, node, "rx_delay", -ENOENT); - pdata->tx_delay = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), - "tx-delay", 0x30); - pdata->rx_delay = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), - "rx-delay", 0x10); + /* ... and fall back to the old naming style or default, if necessary */ + if (pdata->tx_delay == -ENOENT) + pdata->tx_delay = fdtdec_get_int(blob, node, "tx-delay", 0x30); + if (pdata->rx_delay == -ENOENT) + pdata->rx_delay = fdtdec_get_int(blob, node, "rx-delay", 0x10); return designware_eth_ofdata_to_platdata(dev); } -static int gmac_rockchip_fix_mac_speed(struct dw_eth_dev *priv) +static int rk3288_gmac_fix_mac_speed(struct dw_eth_dev *priv) { struct rk3288_grf *grf; int clk; switch (priv->phydev->speed) { case 10: - clk = GMAC_CLK_SEL_2_5M; + clk = RK3288_GMAC_CLK_SEL_2_5M; break; case 100: - clk = GMAC_CLK_SEL_25M; + clk = RK3288_GMAC_CLK_SEL_25M; break; case 1000: - clk = GMAC_CLK_SEL_125M; + clk = RK3288_GMAC_CLK_SEL_125M; break; default: debug("Unknown phy speed: %d\n", priv->phydev->speed); @@ -65,17 +80,83 @@ static int gmac_rockchip_fix_mac_speed(struct dw_eth_dev *priv) } grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF); - rk_clrsetreg(&grf->soc_con1, - GMAC_CLK_SEL_MASK << GMAC_CLK_SEL_SHIFT, - clk << GMAC_CLK_SEL_SHIFT); + rk_clrsetreg(&grf->soc_con1, RK3288_GMAC_CLK_SEL_MASK, clk); + + return 0; +} + +static int rk3399_gmac_fix_mac_speed(struct dw_eth_dev *priv) +{ + struct rk3399_grf_regs *grf; + int clk; + + switch (priv->phydev->speed) { + case 10: + clk = RK3399_GMAC_CLK_SEL_2_5M; + break; + case 100: + clk = RK3399_GMAC_CLK_SEL_25M; + break; + case 1000: + clk = RK3399_GMAC_CLK_SEL_125M; + break; + default: + debug("Unknown phy speed: %d\n", priv->phydev->speed); + return -EINVAL; + } + + grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF); + rk_clrsetreg(&grf->soc_con5, RK3399_GMAC_CLK_SEL_MASK, clk); return 0; } +static void rk3288_gmac_set_to_rgmii(struct gmac_rockchip_platdata *pdata) +{ + struct rk3288_grf *grf; + + grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF); + rk_clrsetreg(&grf->soc_con1, + RK3288_RMII_MODE_MASK | RK3288_GMAC_PHY_INTF_SEL_MASK, + RK3288_GMAC_PHY_INTF_SEL_RGMII); + + rk_clrsetreg(&grf->soc_con3, + RK3288_RXCLK_DLY_ENA_GMAC_MASK | + RK3288_TXCLK_DLY_ENA_GMAC_MASK | + RK3288_CLK_RX_DL_CFG_GMAC_MASK | + RK3288_CLK_TX_DL_CFG_GMAC_MASK, + RK3288_RXCLK_DLY_ENA_GMAC_ENABLE | + RK3288_TXCLK_DLY_ENA_GMAC_ENABLE | + pdata->rx_delay << RK3288_CLK_RX_DL_CFG_GMAC_SHIFT | + pdata->tx_delay << RK3288_CLK_TX_DL_CFG_GMAC_SHIFT); +} + +static void rk3399_gmac_set_to_rgmii(struct gmac_rockchip_platdata *pdata) +{ + struct rk3399_grf_regs *grf; + + grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF); + + rk_clrsetreg(&grf->soc_con5, + RK3399_GMAC_PHY_INTF_SEL_MASK, + RK3399_GMAC_PHY_INTF_SEL_RGMII); + + rk_clrsetreg(&grf->soc_con6, + RK3399_RXCLK_DLY_ENA_GMAC_MASK | + RK3399_TXCLK_DLY_ENA_GMAC_MASK | + RK3399_CLK_RX_DL_CFG_GMAC_MASK | + RK3399_CLK_TX_DL_CFG_GMAC_MASK, + RK3399_RXCLK_DLY_ENA_GMAC_ENABLE | + RK3399_TXCLK_DLY_ENA_GMAC_ENABLE | + pdata->rx_delay << RK3399_CLK_RX_DL_CFG_GMAC_SHIFT | + pdata->tx_delay << RK3399_CLK_TX_DL_CFG_GMAC_SHIFT); +} + static int gmac_rockchip_probe(struct udevice *dev) { struct gmac_rockchip_platdata *pdata = dev_get_platdata(dev); - struct rk3288_grf *grf; + struct rk_gmac_ops *ops = + (struct rk_gmac_ops *)dev_get_driver_data(dev); struct clk clk; int ret; @@ -89,21 +170,7 @@ static int gmac_rockchip_probe(struct udevice *dev) return ret; /* Set to RGMII mode */ - grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF); - rk_clrsetreg(&grf->soc_con1, - RMII_MODE_MASK << RMII_MODE_SHIFT | - GMAC_PHY_INTF_SEL_MASK << GMAC_PHY_INTF_SEL_SHIFT, - GMAC_PHY_INTF_SEL_RGMII << GMAC_PHY_INTF_SEL_SHIFT); - - rk_clrsetreg(&grf->soc_con3, - RXCLK_DLY_ENA_GMAC_MASK << RXCLK_DLY_ENA_GMAC_SHIFT | - TXCLK_DLY_ENA_GMAC_MASK << TXCLK_DLY_ENA_GMAC_SHIFT | - CLK_RX_DL_CFG_GMAC_MASK << CLK_RX_DL_CFG_GMAC_SHIFT | - CLK_TX_DL_CFG_GMAC_MASK << CLK_TX_DL_CFG_GMAC_SHIFT, - RXCLK_DLY_ENA_GMAC_ENABLE << RXCLK_DLY_ENA_GMAC_SHIFT | - TXCLK_DLY_ENA_GMAC_ENABLE << TXCLK_DLY_ENA_GMAC_SHIFT | - pdata->rx_delay << CLK_RX_DL_CFG_GMAC_SHIFT | - pdata->tx_delay << CLK_TX_DL_CFG_GMAC_SHIFT); + ops->set_to_rgmii(pdata); return designware_eth_probe(dev); } @@ -112,12 +179,14 @@ static int gmac_rockchip_eth_start(struct udevice *dev) { struct eth_pdata *pdata = dev_get_platdata(dev); struct dw_eth_dev *priv = dev_get_priv(dev); + struct rk_gmac_ops *ops = + (struct rk_gmac_ops *)dev_get_driver_data(dev); int ret; ret = designware_eth_init(priv, pdata->enetaddr); if (ret) return ret; - ret = gmac_rockchip_fix_mac_speed(priv); + ret = ops->fix_mac_speed(priv); if (ret) return ret; ret = designware_eth_enable(priv); @@ -136,8 +205,21 @@ const struct eth_ops gmac_rockchip_eth_ops = { .write_hwaddr = designware_eth_write_hwaddr, }; +const struct rk_gmac_ops rk3288_gmac_ops = { + .fix_mac_speed = rk3288_gmac_fix_mac_speed, + .set_to_rgmii = rk3288_gmac_set_to_rgmii, +}; + +const struct rk_gmac_ops rk3399_gmac_ops = { + .fix_mac_speed = rk3399_gmac_fix_mac_speed, + .set_to_rgmii = rk3399_gmac_set_to_rgmii, +}; + static const struct udevice_id rockchip_gmac_ids[] = { - { .compatible = "rockchip,rk3288-gmac" }, + { .compatible = "rockchip,rk3288-gmac", + .data = (ulong)&rk3288_gmac_ops }, + { .compatible = "rockchip,rk3399-gmac", + .data = (ulong)&rk3399_gmac_ops }, { } }; diff --git a/drivers/net/greth.c b/drivers/net/greth.c deleted file mode 100644 index aa5d7114a5..0000000000 --- a/drivers/net/greth.c +++ /dev/null @@ -1,677 +0,0 @@ -/* Gaisler.com GRETH 10/100/1000 Ethernet MAC driver - * - * Driver use polling mode (no Interrupt) - * - * (C) Copyright 2007 - * Daniel Hellstrom, Gaisler Research, daniel@gaisler.com - * - * SPDX-License-Identifier: GPL-2.0+ - */ - -/* #define DEBUG */ - -#include <common.h> -#include <command.h> -#include <errno.h> -#include <net.h> -#include <netdev.h> -#include <malloc.h> -#include <asm/processor.h> -#include <ambapp.h> -#include <asm/leon.h> - -#include <grlib/greth.h> - -/* Default to 3s timeout on autonegotiation */ -#ifndef GRETH_PHY_TIMEOUT_MS -#define GRETH_PHY_TIMEOUT_MS 3000 -#endif - -/* Default to PHY adrress 0 not not specified */ -#ifdef CONFIG_SYS_GRLIB_GRETH_PHYADDR -#define GRETH_PHY_ADR_DEFAULT CONFIG_SYS_GRLIB_GRETH_PHYADDR -#else -#define GRETH_PHY_ADR_DEFAULT 0 -#endif - -/* Let board select which GRETH to use as network interface, set - * this to zero if only one GRETH is available. - */ -#ifndef CONFIG_SYS_GRLIB_GRETH_INDEX -#define CONFIG_SYS_GRLIB_GRETH_INDEX 0 -#endif - -/* ByPass Cache when reading regs */ -#define GRETH_REGLOAD(addr) SPARC_NOCACHE_READ(addr) -/* Write-through cache ==> no bypassing needed on writes */ -#define GRETH_REGSAVE(addr,data) (*(volatile unsigned int *)(addr) = (data)) -#define GRETH_REGORIN(addr,data) GRETH_REGSAVE(addr,GRETH_REGLOAD(addr)|data) -#define GRETH_REGANDIN(addr,data) GRETH_REGSAVE(addr,GRETH_REGLOAD(addr)&data) - -#define GRETH_RXBD_CNT 4 -#define GRETH_TXBD_CNT 1 - -#define GRETH_RXBUF_SIZE 1540 -#define GRETH_BUF_ALIGN 4 -#define GRETH_RXBUF_EFF_SIZE \ - ( (GRETH_RXBUF_SIZE&~(GRETH_BUF_ALIGN-1))+GRETH_BUF_ALIGN ) - -typedef struct { - greth_regs *regs; - int irq; - struct eth_device *dev; - - /* Hardware info */ - unsigned char phyaddr; - int gbit_mac; - - /* Current operating Mode */ - int gb; /* GigaBit */ - int fd; /* Full Duplex */ - int sp; /* 10/100Mbps speed (1=100,0=10) */ - int auto_neg; /* Auto negotiate done */ - - unsigned char hwaddr[6]; /* MAC Address */ - - /* Descriptors */ - greth_bd *rxbd_base, *rxbd_max; - greth_bd *txbd_base, *txbd_max; - - greth_bd *rxbd_curr; - - /* rx buffers in rx descriptors */ - void *rxbuf_base; /* (GRETH_RXBUF_SIZE+ALIGNBYTES) * GRETH_RXBD_CNT */ - - /* unused for gbit_mac, temp buffer for sending packets with unligned - * start. - * Pointer to packet allocated with malloc. - */ - void *txbuf; - - struct { - /* rx status */ - unsigned int rx_packets, - rx_crc_errors, rx_frame_errors, rx_length_errors, rx_errors; - - /* tx stats */ - unsigned int tx_packets, - tx_latecol_errors, - tx_underrun_errors, tx_limit_errors, tx_errors; - } stats; -} greth_priv; - -/* Read MII register 'addr' from core 'regs' */ -static int read_mii(int phyaddr, int regaddr, volatile greth_regs * regs) -{ - while (GRETH_REGLOAD(®s->mdio) & GRETH_MII_BUSY) { - } - - GRETH_REGSAVE(®s->mdio, ((phyaddr & 0x1F) << 11) | ((regaddr & 0x1F) << 6) | 2); - - while (GRETH_REGLOAD(®s->mdio) & GRETH_MII_BUSY) { - } - - if (!(GRETH_REGLOAD(®s->mdio) & GRETH_MII_NVALID)) { - return (GRETH_REGLOAD(®s->mdio) >> 16) & 0xFFFF; - } else { - return -1; - } -} - -static void write_mii(int phyaddr, int regaddr, int data, volatile greth_regs * regs) -{ - while (GRETH_REGLOAD(®s->mdio) & GRETH_MII_BUSY) { - } - - GRETH_REGSAVE(®s->mdio, - ((data & 0xFFFF) << 16) | ((phyaddr & 0x1F) << 11) | - ((regaddr & 0x1F) << 6) | 1); - - while (GRETH_REGLOAD(®s->mdio) & GRETH_MII_BUSY) { - } - -} - -/* init/start hardware and allocate descriptor buffers for rx side - * - */ -int greth_init(struct eth_device *dev, bd_t * bis) -{ - int i; - - greth_priv *greth = dev->priv; - greth_regs *regs = greth->regs; - - debug("greth_init\n"); - - /* Reset core */ - GRETH_REGSAVE(®s->control, (GRETH_RESET | (greth->gb << 8) | - (greth->sp << 7) | (greth->fd << 4))); - - /* Wait for Reset to complete */ - while ( GRETH_REGLOAD(®s->control) & GRETH_RESET) ; - - GRETH_REGSAVE(®s->control, - ((greth->gb << 8) | (greth->sp << 7) | (greth->fd << 4))); - - if (!greth->rxbd_base) { - - /* allocate descriptors */ - greth->rxbd_base = (greth_bd *) - memalign(0x1000, GRETH_RXBD_CNT * sizeof(greth_bd)); - greth->txbd_base = (greth_bd *) - memalign(0x1000, GRETH_TXBD_CNT * sizeof(greth_bd)); - - /* allocate buffers to all descriptors */ - greth->rxbuf_base = - malloc(GRETH_RXBUF_EFF_SIZE * GRETH_RXBD_CNT); - } - - /* initate rx decriptors */ - for (i = 0; i < GRETH_RXBD_CNT; i++) { - greth->rxbd_base[i].addr = (unsigned int) - greth->rxbuf_base + (GRETH_RXBUF_EFF_SIZE * i); - /* enable desciptor & set wrap bit if last descriptor */ - if (i >= (GRETH_RXBD_CNT - 1)) { - greth->rxbd_base[i].stat = GRETH_BD_EN | GRETH_BD_WR; - } else { - greth->rxbd_base[i].stat = GRETH_BD_EN; - } - } - - /* initiate indexes */ - greth->rxbd_curr = greth->rxbd_base; - greth->rxbd_max = greth->rxbd_base + (GRETH_RXBD_CNT - 1); - greth->txbd_max = greth->txbd_base + (GRETH_TXBD_CNT - 1); - /* - * greth->txbd_base->addr = 0; - * greth->txbd_base->stat = GRETH_BD_WR; - */ - - /* initate tx decriptors */ - for (i = 0; i < GRETH_TXBD_CNT; i++) { - greth->txbd_base[i].addr = 0; - /* enable desciptor & set wrap bit if last descriptor */ - if (i >= (GRETH_TXBD_CNT - 1)) { - greth->txbd_base[i].stat = GRETH_BD_WR; - } else { - greth->txbd_base[i].stat = 0; - } - } - - /**** SET HARDWARE REGS ****/ - - /* Set pointer to tx/rx descriptor areas */ - GRETH_REGSAVE(®s->rx_desc_p, (unsigned int)&greth->rxbd_base[0]); - GRETH_REGSAVE(®s->tx_desc_p, (unsigned int)&greth->txbd_base[0]); - - /* Enable Transmitter, GRETH will now scan descriptors for packets - * to transmitt */ - debug("greth_init: enabling receiver\n"); - GRETH_REGORIN(®s->control, GRETH_RXEN); - - return 0; -} - -/* Initiate PHY to a relevant speed - * return: - * - 0 = success - * - 1 = timeout/fail - */ -int greth_init_phy(greth_priv * dev, bd_t * bis) -{ - greth_regs *regs = dev->regs; - int tmp, tmp1, tmp2, i; - unsigned int start, timeout; - int phyaddr = GRETH_PHY_ADR_DEFAULT; - -#ifndef CONFIG_SYS_GRLIB_GRETH_PHYADDR - /* If BSP doesn't provide a hardcoded PHY address the driver will - * try to autodetect PHY address by stopping the search on the first - * PHY address which has REG0 implemented. - */ - for (i=0; i<32; i++) { - tmp = read_mii(i, 0, regs); - if ( (tmp != 0) && (tmp != 0xffff) ) { - phyaddr = i; - break; - } - } -#endif - - /* Save PHY Address */ - dev->phyaddr = phyaddr; - - debug("GRETH PHY ADDRESS: %d\n", phyaddr); - - /* X msecs to ticks */ - timeout = GRETH_PHY_TIMEOUT_MS * 1000; - - /* Get system timer0 current value - * Total timeout is 5s - */ - start = get_timer(0); - - /* get phy control register default values */ - - while ((tmp = read_mii(phyaddr, 0, regs)) & 0x8000) { - if (get_timer(start) > timeout) { - debug("greth_init_phy: PHY read 1 failed\n"); - return 1; /* Fail */ - } - } - - /* reset PHY and wait for completion */ - write_mii(phyaddr, 0, 0x8000 | tmp, regs); - - while (((tmp = read_mii(phyaddr, 0, regs))) & 0x8000) { - if (get_timer(start) > timeout) { - debug("greth_init_phy: PHY read 2 failed\n"); - return 1; /* Fail */ - } - } - - /* Check if PHY is autoneg capable and then determine operating - * mode, otherwise force it to 10 Mbit halfduplex - */ - dev->gb = 0; - dev->fd = 0; - dev->sp = 0; - dev->auto_neg = 0; - if (!((tmp >> 12) & 1)) { - write_mii(phyaddr, 0, 0, regs); - } else { - /* wait for auto negotiation to complete and then check operating mode */ - dev->auto_neg = 1; - i = 0; - while (!(((tmp = read_mii(phyaddr, 1, regs)) >> 5) & 1)) { - if (get_timer(start) > timeout) { - printf("Auto negotiation timed out. " - "Selecting default config\n"); - tmp = read_mii(phyaddr, 0, regs); - dev->gb = ((tmp >> 6) & 1) - && !((tmp >> 13) & 1); - dev->sp = !((tmp >> 6) & 1) - && ((tmp >> 13) & 1); - dev->fd = (tmp >> 8) & 1; - goto auto_neg_done; - } - } - if ((tmp >> 8) & 1) { - tmp1 = read_mii(phyaddr, 9, regs); - tmp2 = read_mii(phyaddr, 10, regs); - if ((tmp1 & GRETH_MII_EXTADV_1000FD) && - (tmp2 & GRETH_MII_EXTPRT_1000FD)) { - dev->gb = 1; - dev->fd = 1; - } - if ((tmp1 & GRETH_MII_EXTADV_1000HD) && - (tmp2 & GRETH_MII_EXTPRT_1000HD)) { - dev->gb = 1; - dev->fd = 0; - } - } - if ((dev->gb == 0) || ((dev->gb == 1) && (dev->gbit_mac == 0))) { - tmp1 = read_mii(phyaddr, 4, regs); - tmp2 = read_mii(phyaddr, 5, regs); - if ((tmp1 & GRETH_MII_100TXFD) && - (tmp2 & GRETH_MII_100TXFD)) { - dev->sp = 1; - dev->fd = 1; - } - if ((tmp1 & GRETH_MII_100TXHD) && - (tmp2 & GRETH_MII_100TXHD)) { - dev->sp = 1; - dev->fd = 0; - } - if ((tmp1 & GRETH_MII_10FD) && (tmp2 & GRETH_MII_10FD)) { - dev->fd = 1; - } - if ((dev->gb == 1) && (dev->gbit_mac == 0)) { - dev->gb = 0; - dev->fd = 0; - write_mii(phyaddr, 0, dev->sp << 13, regs); - } - } - - } - auto_neg_done: - debug("%s GRETH Ethermac at [0x%x] irq %d. Running \ - %d Mbps %s duplex\n", dev->gbit_mac ? "10/100/1000" : "10/100", (unsigned int)(regs), (unsigned int)(dev->irq), dev->gb ? 1000 : (dev->sp ? 100 : 10), dev->fd ? "full" : "half"); - /* Read out PHY info if extended registers are available */ - if (tmp & 1) { - tmp1 = read_mii(phyaddr, 2, regs); - tmp2 = read_mii(phyaddr, 3, regs); - tmp1 = (tmp1 << 6) | ((tmp2 >> 10) & 0x3F); - tmp = tmp2 & 0xF; - - tmp2 = (tmp2 >> 4) & 0x3F; - debug("PHY: Vendor %x Device %x Revision %d\n", tmp1, - tmp2, tmp); - } else { - printf("PHY info not available\n"); - } - - /* set speed and duplex bits in control register */ - GRETH_REGORIN(®s->control, - (dev->gb << 8) | (dev->sp << 7) | (dev->fd << 4)); - - return 0; -} - -void greth_halt(struct eth_device *dev) -{ - greth_priv *greth; - greth_regs *regs; - int i; - - debug("greth_halt\n"); - - if (!dev || !dev->priv) - return; - - greth = dev->priv; - regs = greth->regs; - - if (!regs) - return; - - /* disable receiver/transmitter by clearing the enable bits */ - GRETH_REGANDIN(®s->control, ~(GRETH_RXEN | GRETH_TXEN)); - - /* reset rx/tx descriptors */ - if (greth->rxbd_base) { - for (i = 0; i < GRETH_RXBD_CNT; i++) { - greth->rxbd_base[i].stat = - (i >= (GRETH_RXBD_CNT - 1)) ? GRETH_BD_WR : 0; - } - } - - if (greth->txbd_base) { - for (i = 0; i < GRETH_TXBD_CNT; i++) { - greth->txbd_base[i].stat = - (i >= (GRETH_TXBD_CNT - 1)) ? GRETH_BD_WR : 0; - } - } -} - -int greth_send(struct eth_device *dev, void *eth_data, int data_length) -{ - greth_priv *greth = dev->priv; - greth_regs *regs = greth->regs; - greth_bd *txbd; - void *txbuf; - unsigned int status; - - debug("greth_send\n"); - - /* send data, wait for data to be sent, then return */ - if (((unsigned int)eth_data & (GRETH_BUF_ALIGN - 1)) - && !greth->gbit_mac) { - /* data not aligned as needed by GRETH 10/100, solve this by allocating 4 byte aligned buffer - * and copy data to before giving it to GRETH. - */ - if (!greth->txbuf) { - greth->txbuf = malloc(GRETH_RXBUF_SIZE); - } - - txbuf = greth->txbuf; - - /* copy data info buffer */ - memcpy((char *)txbuf, (char *)eth_data, data_length); - - /* keep buffer to next time */ - } else { - txbuf = (void *)eth_data; - } - /* get descriptor to use, only 1 supported... hehe easy */ - txbd = greth->txbd_base; - - /* setup descriptor to wrap around to it self */ - txbd->addr = (unsigned int)txbuf; - txbd->stat = GRETH_BD_EN | GRETH_BD_WR | data_length; - - /* Remind Core which descriptor to use when sending */ - GRETH_REGSAVE(®s->tx_desc_p, (unsigned int)txbd); - - /* initate send by enabling transmitter */ - GRETH_REGORIN(®s->control, GRETH_TXEN); - - /* Wait for data to be sent */ - while ((status = GRETH_REGLOAD(&txbd->stat)) & GRETH_BD_EN) { - ; - } - - /* was the packet transmitted succesfully? */ - if (status & GRETH_TXBD_ERR_AL) { - greth->stats.tx_limit_errors++; - } - - if (status & GRETH_TXBD_ERR_UE) { - greth->stats.tx_underrun_errors++; - } - - if (status & GRETH_TXBD_ERR_LC) { - greth->stats.tx_latecol_errors++; - } - - if (status & - (GRETH_TXBD_ERR_LC | GRETH_TXBD_ERR_UE | GRETH_TXBD_ERR_AL)) { - /* any error */ - greth->stats.tx_errors++; - return -1; - } - - /* bump tx packet counter */ - greth->stats.tx_packets++; - - /* return succefully */ - return 0; -} - -int greth_recv(struct eth_device *dev) -{ - greth_priv *greth = dev->priv; - greth_regs *regs = greth->regs; - greth_bd *rxbd; - unsigned int status, len = 0, bad; - char *d; - int enable = 0; - int i; - - /* Receive One packet only, but clear as many error packets as there are - * available. - */ - { - /* current receive descriptor */ - rxbd = greth->rxbd_curr; - - /* get status of next received packet */ - status = GRETH_REGLOAD(&rxbd->stat); - - bad = 0; - - /* stop if no more packets received */ - if (status & GRETH_BD_EN) { - goto done; - } - - debug("greth_recv: packet 0x%x, 0x%x, len: %d\n", - (unsigned int)rxbd, status, status & GRETH_BD_LEN); - - /* Check status for errors. - */ - if (status & GRETH_RXBD_ERR_FT) { - greth->stats.rx_length_errors++; - bad = 1; - } - if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) { - greth->stats.rx_frame_errors++; - bad = 1; - } - if (status & GRETH_RXBD_ERR_CRC) { - greth->stats.rx_crc_errors++; - bad = 1; - } - if (bad) { - greth->stats.rx_errors++; - printf - ("greth_recv: Bad packet (%d, %d, %d, 0x%08x, %d)\n", - greth->stats.rx_length_errors, - greth->stats.rx_frame_errors, - greth->stats.rx_crc_errors, status, - greth->stats.rx_packets); - /* print all rx descriptors */ - for (i = 0; i < GRETH_RXBD_CNT; i++) { - printf("[%d]: Stat=0x%lx, Addr=0x%lx\n", i, - GRETH_REGLOAD(&greth->rxbd_base[i].stat), - GRETH_REGLOAD(&greth->rxbd_base[i].addr)); - } - } else { - /* Process the incoming packet. */ - len = status & GRETH_BD_LEN; - d = (char *)rxbd->addr; - - debug - ("greth_recv: new packet, length: %d. data: %x %x %x %x %x %x %x %x\n", - len, d[0], d[1], d[2], d[3], d[4], d[5], d[6], - d[7]); - - /* flush all data cache to make sure we're not reading old packet data */ - sparc_dcache_flush_all(); - - /* pass packet on to network subsystem */ - net_process_received_packet((void *)d, len); - - /* bump stats counters */ - greth->stats.rx_packets++; - - /* bad is now 0 ==> will stop loop */ - } - - /* reenable descriptor to receive more packet with this descriptor, wrap around if needed */ - rxbd->stat = - GRETH_BD_EN | - (((unsigned int)greth->rxbd_curr >= - (unsigned int)greth->rxbd_max) ? GRETH_BD_WR : 0); - enable = 1; - - /* increase index */ - greth->rxbd_curr = - ((unsigned int)greth->rxbd_curr >= - (unsigned int)greth->rxbd_max) ? greth-> - rxbd_base : (greth->rxbd_curr + 1); - - } - - if (enable) { - GRETH_REGORIN(®s->control, GRETH_RXEN); - } - done: - /* return positive length of packet or 0 if non received */ - return len; -} - -void greth_set_hwaddr(greth_priv * greth, unsigned char *mac) -{ - /* save new MAC address */ - greth->dev->enetaddr[0] = greth->hwaddr[0] = mac[0]; - greth->dev->enetaddr[1] = greth->hwaddr[1] = mac[1]; - greth->dev->enetaddr[2] = greth->hwaddr[2] = mac[2]; - greth->dev->enetaddr[3] = greth->hwaddr[3] = mac[3]; - greth->dev->enetaddr[4] = greth->hwaddr[4] = mac[4]; - greth->dev->enetaddr[5] = greth->hwaddr[5] = mac[5]; - greth->regs->esa_msb = (mac[0] << 8) | mac[1]; - greth->regs->esa_lsb = - (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5]; - - debug("GRETH: New MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); -} - -int greth_initialize(bd_t * bis) -{ - greth_priv *greth; - ambapp_apbdev apbdev; - struct eth_device *dev; - int i; - char *addr_str, *end; - unsigned char addr[6]; - - debug("Scanning for GRETH\n"); - - /* Find Device & IRQ via AMBA Plug&Play information, - * CONFIG_SYS_GRLIB_GRETH_INDEX select which GRETH if multiple - * GRETHs in system. - */ - if (ambapp_apb_find(&ambapp_plb, VENDOR_GAISLER, GAISLER_ETHMAC, - CONFIG_SYS_GRLIB_GRETH_INDEX, &apbdev) != 1) { - return -1; /* GRETH not found */ - } - - greth = (greth_priv *) malloc(sizeof(greth_priv)); - dev = (struct eth_device *)malloc(sizeof(struct eth_device)); - memset(dev, 0, sizeof(struct eth_device)); - memset(greth, 0, sizeof(greth_priv)); - - greth->regs = (greth_regs *) apbdev.address; - greth->irq = apbdev.irq; - debug("Found GRETH at %p, irq %d\n", greth->regs, greth->irq); - dev->priv = (void *)greth; - dev->iobase = (unsigned int)greth->regs; - dev->init = greth_init; - dev->halt = greth_halt; - dev->send = greth_send; - dev->recv = greth_recv; - greth->dev = dev; - - /* Reset Core */ - GRETH_REGSAVE(&greth->regs->control, GRETH_RESET); - - /* Wait for core to finish reset cycle */ - while (GRETH_REGLOAD(&greth->regs->control) & GRETH_RESET) ; - - /* Get the phy address which assumed to have been set - correctly with the reset value in hardware */ - greth->phyaddr = (GRETH_REGLOAD(&greth->regs->mdio) >> 11) & 0x1F; - - /* Check if mac is gigabit capable */ - greth->gbit_mac = (GRETH_REGLOAD(&greth->regs->control) >> 27) & 1; - - /* Make descriptor string */ - if (greth->gbit_mac) { - strcpy(dev->name, "GRETH_10/100/GB"); - } else { - strcpy(dev->name, "GRETH_10/100"); - } - - /* initiate PHY, select speed/duplex depending on connected PHY */ - if (greth_init_phy(greth, bis)) { - /* Failed to init PHY (timedout) */ - debug("GRETH[%p]: Failed to init PHY\n", greth->regs); - return -1; - } - - /* Register Device to EtherNet subsystem */ - eth_register(dev); - - /* Get MAC address */ - if ((addr_str = getenv("ethaddr")) != NULL) { - for (i = 0; i < 6; i++) { - addr[i] = - addr_str ? simple_strtoul(addr_str, &end, 16) : 0; - if (addr_str) { - addr_str = (*end) ? end + 1 : end; - } - } - } else { - /* No ethaddr set */ - return -EINVAL; - } - - /* set and remember MAC address */ - greth_set_hwaddr(greth, addr); - - debug("GRETH[%p]: Initialized successfully\n", greth->regs); - return 0; -} diff --git a/drivers/net/greth.h b/drivers/net/greth.h deleted file mode 100644 index 5299b2861c..0000000000 --- a/drivers/net/greth.h +++ /dev/null @@ -1,81 +0,0 @@ -/* Gaisler.com GRETH 10/100/1000 Ethernet MAC driver - * - * (C) Copyright 2007 - * Daniel Hellstrom, Gaisler Research, daniel@gaisler.com - * - * SPDX-License-Identifier: GPL-2.0+ - */ - -#define GRETH_FD 0x10 -#define GRETH_RESET 0x40 -#define GRETH_MII_BUSY 0x8 -#define GRETH_MII_NVALID 0x10 - -/* MII registers */ -#define GRETH_MII_EXTADV_1000FD 0x00000200 -#define GRETH_MII_EXTADV_1000HD 0x00000100 -#define GRETH_MII_EXTPRT_1000FD 0x00000800 -#define GRETH_MII_EXTPRT_1000HD 0x00000400 - -#define GRETH_MII_100T4 0x00000200 -#define GRETH_MII_100TXFD 0x00000100 -#define GRETH_MII_100TXHD 0x00000080 -#define GRETH_MII_10FD 0x00000040 -#define GRETH_MII_10HD 0x00000020 - -#define GRETH_BD_EN 0x800 -#define GRETH_BD_WR 0x1000 -#define GRETH_BD_IE 0x2000 -#define GRETH_BD_LEN 0x7FF - -#define GRETH_TXEN 0x1 -#define GRETH_INT_TX 0x8 -#define GRETH_TXI 0x4 -#define GRETH_TXBD_STATUS 0x0001C000 -#define GRETH_TXBD_MORE 0x20000 -#define GRETH_TXBD_IPCS 0x40000 -#define GRETH_TXBD_TCPCS 0x80000 -#define GRETH_TXBD_UDPCS 0x100000 -#define GRETH_TXBD_ERR_LC 0x10000 -#define GRETH_TXBD_ERR_UE 0x4000 -#define GRETH_TXBD_ERR_AL 0x8000 -#define GRETH_TXBD_NUM 128 -#define GRETH_TXBD_NUM_MASK (GRETH_TXBD_NUM-1) -#define GRETH_TX_BUF_SIZE 2048 - -#define GRETH_INT_RX 0x4 -#define GRETH_RXEN 0x2 -#define GRETH_RXI 0x8 -#define GRETH_RXBD_STATUS 0xFFFFC000 -#define GRETH_RXBD_ERR_AE 0x4000 -#define GRETH_RXBD_ERR_FT 0x8000 -#define GRETH_RXBD_ERR_CRC 0x10000 -#define GRETH_RXBD_ERR_OE 0x20000 -#define GRETH_RXBD_ERR_LE 0x40000 -#define GRETH_RXBD_IP_DEC 0x80000 -#define GRETH_RXBD_IP_CSERR 0x100000 -#define GRETH_RXBD_UDP_DEC 0x200000 -#define GRETH_RXBD_UDP_CSERR 0x400000 -#define GRETH_RXBD_TCP_DEC 0x800000 -#define GRETH_RXBD_TCP_CSERR 0x1000000 - -#define GRETH_RXBD_NUM 128 -#define GRETH_RXBD_NUM_MASK (GRETH_RXBD_NUM-1) -#define GRETH_RX_BUF_SIZE 2048 - -/* Ethernet configuration registers */ -typedef struct _greth_regs { - volatile unsigned int control; - volatile unsigned int status; - volatile unsigned int esa_msb; - volatile unsigned int esa_lsb; - volatile unsigned int mdio; - volatile unsigned int tx_desc_p; - volatile unsigned int rx_desc_p; -} greth_regs; - -/* Ethernet buffer descriptor */ -typedef struct _greth_bd { - volatile unsigned int stat; - unsigned int addr; /* Buffer address not changed by HW */ -} greth_bd; diff --git a/drivers/net/ldpaa_eth/ls2080a.c b/drivers/net/ldpaa_eth/ls2080a.c index 93ed4f18fe..673e428a40 100644 --- a/drivers/net/ldpaa_eth/ls2080a.c +++ b/drivers/net/ldpaa_eth/ls2080a.c @@ -79,3 +79,33 @@ phy_interface_t wriop_dpmac_enet_if(int dpmac_id, int lane_prtcl) return PHY_INTERFACE_MODE_NONE; } + +void wriop_init_dpmac_qsgmii(int sd, int lane_prtcl) +{ + switch (lane_prtcl) { + case QSGMII_A: + wriop_init_dpmac(sd, 5, (int)lane_prtcl); + wriop_init_dpmac(sd, 6, (int)lane_prtcl); + wriop_init_dpmac(sd, 7, (int)lane_prtcl); + wriop_init_dpmac(sd, 8, (int)lane_prtcl); + break; + case QSGMII_B: + wriop_init_dpmac(sd, 1, (int)lane_prtcl); + wriop_init_dpmac(sd, 2, (int)lane_prtcl); + wriop_init_dpmac(sd, 3, (int)lane_prtcl); + wriop_init_dpmac(sd, 4, (int)lane_prtcl); + break; + case QSGMII_C: + wriop_init_dpmac(sd, 13, (int)lane_prtcl); + wriop_init_dpmac(sd, 14, (int)lane_prtcl); + wriop_init_dpmac(sd, 15, (int)lane_prtcl); + wriop_init_dpmac(sd, 16, (int)lane_prtcl); + break; + case QSGMII_D: + wriop_init_dpmac(sd, 9, (int)lane_prtcl); + wriop_init_dpmac(sd, 10, (int)lane_prtcl); + wriop_init_dpmac(sd, 11, (int)lane_prtcl); + wriop_init_dpmac(sd, 12, (int)lane_prtcl); + break; + } +} diff --git a/drivers/net/mvneta.c b/drivers/net/mvneta.c index 674075f037..8881cc77fe 100644 --- a/drivers/net/mvneta.c +++ b/drivers/net/mvneta.c @@ -191,11 +191,16 @@ DECLARE_GLOBAL_DATA_PTR; #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) +#define MVNETA_GMAC_FORCE_LINK_UP (BIT(0) | BIT(1)) +#define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVNETA_GMAC_AN_SPEED_EN BIT(7) +#define MVNETA_GMAC_SET_FC_EN BIT(8) +#define MVNETA_GMAC_ADVERT_FC_EN BIT(9) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) +#define MVNETA_GMAC_SAMPLE_TX_CFG_EN BIT(15) #define MVNETA_MIB_COUNTERS_BASE 0x3080 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 @@ -404,6 +409,15 @@ static struct buffer_location buffer_loc; */ #define BD_SPACE (1 << 20) +/* + * Dummy implementation that can be overwritten by a board + * specific function + */ +__weak int board_network_enable(struct mii_dev *bus) +{ + return 0; +} + /* Utility/helper methods */ /* Write helper method */ @@ -557,6 +571,13 @@ static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); } +static int mvneta_port_is_fixed_link(struct mvneta_port *pp) +{ + /* phy_addr is set to invalid value for fixed link */ + return pp->phyaddr > PHY_MAX_ADDR; +} + + /* Start the Ethernet port RX and TX activity */ static void mvneta_port_up(struct mvneta_port *pp) { @@ -807,10 +828,12 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); - /* Enable PHY polling in hardware for U-Boot */ - val = mvreg_read(pp, MVNETA_UNIT_CONTROL); - val |= MVNETA_PHY_POLLING_ENABLE; - mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + /* Enable PHY polling in hardware if not in fixed-link mode */ + if (!mvneta_port_is_fixed_link(pp)) { + val = mvreg_read(pp, MVNETA_UNIT_CONTROL); + val |= MVNETA_PHY_POLLING_ENABLE; + mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + } mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); @@ -1128,6 +1151,11 @@ static void mvneta_adjust_link(struct udevice *dev) struct phy_device *phydev = pp->phydev; int status_change = 0; + if (mvneta_port_is_fixed_link(pp)) { + debug("Using fixed link, skip link adjust\n"); + return; + } + if (phydev->link) { if ((pp->speed != phydev->speed) || (pp->duplex != phydev->duplex)) { @@ -1498,28 +1526,54 @@ static int mvneta_start(struct udevice *dev) mvneta_port_power_up(pp, pp->phy_interface); if (!pp->init || pp->link == 0) { - /* Set phy address of the port */ - mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr); - phydev = phy_connect(pp->bus, pp->phyaddr, dev, - pp->phy_interface); - - pp->phydev = phydev; - phy_config(phydev); - phy_startup(phydev); - if (!phydev->link) { - printf("%s: No link.\n", phydev->dev->name); - return -1; - } + if (mvneta_port_is_fixed_link(pp)) { + u32 val; - /* Full init on first call */ - mvneta_init(dev); - pp->init = 1; - } else { - /* Upon all following calls, this is enough */ - mvneta_port_up(pp); - mvneta_port_enable(pp); + pp->init = 1; + pp->link = 1; + mvneta_init(dev); + + val = MVNETA_GMAC_FORCE_LINK_UP | + MVNETA_GMAC_IB_BYPASS_AN_EN | + MVNETA_GMAC_SET_FC_EN | + MVNETA_GMAC_ADVERT_FC_EN | + MVNETA_GMAC_SAMPLE_TX_CFG_EN; + + if (pp->duplex) + val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (pp->speed == SPEED_1000) + val |= MVNETA_GMAC_CONFIG_GMII_SPEED; + else if (pp->speed == SPEED_100) + val |= MVNETA_GMAC_CONFIG_MII_SPEED; + + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); + } else { + /* Set phy address of the port */ + mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr); + + phydev = phy_connect(pp->bus, pp->phyaddr, dev, + pp->phy_interface); + + pp->phydev = phydev; + phy_config(phydev); + phy_startup(phydev); + if (!phydev->link) { + printf("%s: No link.\n", phydev->dev->name); + return -1; + } + + /* Full init on first call */ + mvneta_init(dev); + pp->init = 1; + return 0; + } } + /* Upon all following calls, this is enough */ + mvneta_port_up(pp); + mvneta_port_enable(pp); + return 0; } @@ -1615,6 +1669,8 @@ static int mvneta_probe(struct udevice *dev) struct mii_dev *bus; unsigned long addr; void *bd_space; + int ret; + int fl_node; /* * Allocate buffer area for descs and rx_buffers. This is only @@ -1647,10 +1703,19 @@ static int mvneta_probe(struct udevice *dev) /* PHY interface is already decoded in mvneta_ofdata_to_platdata() */ pp->phy_interface = pdata->phy_interface; - /* Now read phyaddr from DT */ - addr = fdtdec_get_int(blob, node, "phy", 0); - addr = fdt_node_offset_by_phandle(blob, addr); - pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0); + /* fetch 'fixed-link' property from 'neta' node */ + fl_node = fdt_subnode_offset(blob, node, "fixed-link"); + if (fl_node != -FDT_ERR_NOTFOUND) { + /* set phy_addr to invalid value for fixed link */ + pp->phyaddr = PHY_MAX_ADDR + 1; + pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex"); + pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0); + } else { + /* Now read phyaddr from DT */ + addr = fdtdec_get_int(blob, node, "phy", 0); + addr = fdt_node_offset_by_phandle(blob, addr); + pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0); + } bus = mdio_alloc(); if (!bus) { @@ -1664,7 +1729,11 @@ static int mvneta_probe(struct udevice *dev) bus->priv = (void *)pp; pp->bus = bus; - return mdio_register(bus); + ret = mdio_register(bus); + if (ret) + return ret; + + return board_network_enable(bus); } static void mvneta_stop(struct udevice *dev) diff --git a/drivers/net/mvpp2.c b/drivers/net/mvpp2.c index 88e88b903b..8ffe6c84d4 100644 --- a/drivers/net/mvpp2.c +++ b/drivers/net/mvpp2.c @@ -6,7 +6,7 @@ * Marcin Wojtas <mw@semihalf.com> * * U-Boot version: - * Copyright (C) 2016 Stefan Roese <sr@denx.de> + * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -91,9 +91,11 @@ do { \ #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) #define MVPP2_RXQ_POOL_SHORT_OFFS 20 -#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 #define MVPP2_RXQ_POOL_LONG_OFFS 24 -#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 #define MVPP2_RXQ_DISABLE_MASK BIT(31) @@ -141,6 +143,7 @@ do { \ /* Descriptor Manager Top Registers */ #define MVPP2_RXQ_NUM_REG 0x2040 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP22_DESC_ADDR_OFFS 8 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) @@ -182,6 +185,7 @@ do { \ #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) @@ -194,9 +198,51 @@ do { \ #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) #define MVPP2_BASE_ADDR_ENABLE 0x4060 +/* AXI Bridge Registers */ +#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 +#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 +#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 +#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 +#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 +#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c +#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 +#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 +#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 +#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 +#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 +#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 + +/* Values for AXI Bridge registers */ +#define MVPP22_AXI_ATTR_CACHE_OFFS 0 +#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 + +#define MVPP22_AXI_CODE_CACHE_OFFS 0 +#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 + +#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 +#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 +#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb + +#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 +#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 + /* Interrupt Cause and Mask registers */ #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) -#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) +#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) + +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 + +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 + +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 + #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) @@ -251,14 +297,23 @@ do { \ #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444 +#define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff +#define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00 +#define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) #define MVPP2_BM_VIRT_RLS_REG 0x64c0 -#define MVPP2_BM_MC_RLS_REG 0x64c4 +#define MVPP21_BM_MC_RLS_REG 0x64c4 #define MVPP2_BM_MC_ID_MASK 0xfff #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) +#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 +#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 +#define MVPP22_BM_MC_RLS_REG 0x64d4 /* TX Scheduler registers */ #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 @@ -294,16 +349,13 @@ do { \ #define MVPP2_SRC_ADDR_HIGH 0x28 #define MVPP2_PHY_AN_CFG0_REG 0x34 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) -#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \ - 0x400 + (port) * 0x400) -#define MVPP2_MIB_LATE_COLLISION 0x7c -#define MVPP2_ISR_SUM_MASK_REG 0x220c #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c -#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 /* Per-port registers */ #define MVPP2_GMAC_CTRL_0_REG 0x0 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) @@ -315,23 +367,131 @@ do { \ #define MVPP2_GMAC_SA_LOW_OFFS 7 #define MVPP2_GMAC_CTRL_2_REG 0x8 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_SGMII_MODE_MASK BIT(0) #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) +#define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5) #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9) #define MVPP2_GMAC_AUTONEG_CONFIG 0xc #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_EN_PCS_AN BIT(2) +#define MVPP2_GMAC_AN_BYPASS_EN BIT(3) #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVPP2_GMAC_AN_SPEED_EN BIT(7) #define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_EN_FC_AN BIT(11) #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15) #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +#define MVPP2_GMAC_CTRL_4_REG 0x90 +#define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0) +#define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5) +#define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6) +#define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7) + +/* + * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, + * relative to port->base. + */ + +/* Port Mac Control0 */ +#define MVPP22_XLG_CTRL0_REG 0x100 +#define MVPP22_XLG_PORT_EN BIT(0) +#define MVPP22_XLG_MAC_RESETN BIT(1) +#define MVPP22_XLG_RX_FC_EN BIT(7) +#define MVPP22_XLG_MIBCNT_DIS BIT(13) +/* Port Mac Control1 */ +#define MVPP22_XLG_CTRL1_REG 0x104 +#define MVPP22_XLG_MAX_RX_SIZE_OFFS 0 +#define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff +/* Port Interrupt Mask */ +#define MVPP22_XLG_INTERRUPT_MASK_REG 0x118 +#define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1) +/* Port Mac Control3 */ +#define MVPP22_XLG_CTRL3_REG 0x11c +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13) +/* Port Mac Control4 */ +#define MVPP22_XLG_CTRL4_REG 0x184 +#define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5) +#define MVPP22_XLG_FORWARD_PFC_EN BIT(6) +#define MVPP22_XLG_MODE_DMA_1G BIT(12) +#define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14) + +/* XPCS registers */ + +/* Global Configuration 0 */ +#define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0 +#define MVPP22_XPCS_PCSRESET BIT(0) +#define MVPP22_XPCS_PCSMODE_OFFS 3 +#define MVPP22_XPCS_PCSMODE_MASK (0x3 << \ + MVPP22_XPCS_PCSMODE_OFFS) +#define MVPP22_XPCS_LANEACTIVE_OFFS 5 +#define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \ + MVPP22_XPCS_LANEACTIVE_OFFS) + +/* MPCS registers */ + +#define PCS40G_COMMON_CONTROL 0x14 +#define FORWARD_ERROR_CORRECTION_MASK BIT(1) + +#define PCS_CLOCK_RESET 0x14c +#define TX_SD_CLK_RESET_MASK BIT(0) +#define RX_SD_CLK_RESET_MASK BIT(1) +#define MAC_CLK_RESET_MASK BIT(2) +#define CLK_DIVISION_RATIO_OFFS 4 +#define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS) +#define CLK_DIV_PHASE_SET_MASK BIT(11) + +/* System Soft Reset 1 */ +#define GOP_SOFT_RESET_1_REG 0x108 +#define NETC_GOP_SOFT_RESET_OFFS 6 +#define NETC_GOP_SOFT_RESET_MASK (0x1 << \ + NETC_GOP_SOFT_RESET_OFFS) + +/* Ports Control 0 */ +#define NETCOMP_PORTS_CONTROL_0_REG 0x110 +#define NETC_BUS_WIDTH_SELECT_OFFS 1 +#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \ + NETC_BUS_WIDTH_SELECT_OFFS) +#define NETC_GIG_RX_DATA_SAMPLE_OFFS 29 +#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \ + NETC_GIG_RX_DATA_SAMPLE_OFFS) +#define NETC_CLK_DIV_PHASE_OFFS 31 +#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS) +/* Ports Control 1 */ +#define NETCOMP_PORTS_CONTROL_1_REG 0x114 +#define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p) +#define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \ + NETC_PORTS_ACTIVE_OFFSET(p)) +#define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p) +#define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \ + NETC_PORT_GIG_RF_RESET_OFFS(p)) +#define NETCOMP_CONTROL_0_REG 0x120 +#define NETC_GBE_PORT0_SGMII_MODE_OFFS 0 +#define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \ + NETC_GBE_PORT0_SGMII_MODE_OFFS) +#define NETC_GBE_PORT1_SGMII_MODE_OFFS 1 +#define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \ + NETC_GBE_PORT1_SGMII_MODE_OFFS) +#define NETC_GBE_PORT1_MII_MODE_OFFS 2 +#define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \ + NETC_GBE_PORT1_MII_MODE_OFFS) + +#define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04) +#define MVPP22_SMI_POLLING_EN BIT(10) + +#define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \ + (0x4 * (port))) #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff @@ -340,7 +500,9 @@ do { \ (((index) < (q)->last_desc) ? ((index) + 1) : 0) /* SMI: 0xc0054 -> offset 0x54 to lms_base */ -#define MVPP2_SMI 0x0054 +#define MVPP21_SMI 0x0054 +/* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */ +#define MVPP22_SMI 0x1200 #define MVPP2_PHY_REG_MASK 0x1f /* SMI register fields */ #define MVPP2_SMI_DATA_OFFS 0 /* Data */ @@ -355,6 +517,48 @@ do { \ #define MVPP2_PHY_ADDR_MASK 0x1f #define MVPP2_PHY_REG_MASK 0x1f +/* Additional PPv2.2 offsets */ +#define MVPP22_MPCS 0x007000 +#define MVPP22_XPCS 0x007400 +#define MVPP22_PORT_BASE 0x007e00 +#define MVPP22_PORT_OFFSET 0x001000 +#define MVPP22_RFU1 0x318000 + +/* Maximum number of ports */ +#define MVPP22_GOP_MAC_NUM 4 + +/* Sets the field located at the specified in data */ +#define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41 +#define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5 +#define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb + +/* Net Complex */ +enum mv_netc_topology { + MV_NETC_GE_MAC2_SGMII = BIT(0), + MV_NETC_GE_MAC3_SGMII = BIT(1), + MV_NETC_GE_MAC3_RGMII = BIT(2), +}; + +enum mv_netc_phase { + MV_NETC_FIRST_PHASE, + MV_NETC_SECOND_PHASE, +}; + +enum mv_netc_sgmii_xmi_mode { + MV_NETC_GBE_SGMII, + MV_NETC_GBE_XMII, +}; + +enum mv_netc_mii_mode { + MV_NETC_GBE_RGMII, + MV_NETC_GBE_MII, +}; + +enum mv_netc_lanes { + MV_NETC_LANE_23, + MV_NETC_LANE_45, +}; + /* Various constants */ /* Coalescing */ @@ -397,9 +601,6 @@ do { \ /* Maximum number of TXQs used by single port */ #define MVPP2_MAX_TXQ 8 -/* Maximum number of RXQs used by single port */ -#define MVPP2_MAX_RXQ 8 - /* Default number of TXQs in use */ #define MVPP2_DEFAULT_TXQ 1 @@ -407,9 +608,6 @@ do { \ #define MVPP2_DEFAULT_RXQ 1 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */ -/* Total number of RXQs available to all ports */ -#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ) - /* Max number of Rx descriptors */ #define MVPP2_MAX_RXD 16 @@ -429,9 +627,23 @@ do { \ #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) /* RX FIFO constants */ -#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80 -#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 +#define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000 +#define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80 +#define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000 +#define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000 +#define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000 +#define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200 +#define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80 +#define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40 +#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 + +/* TX general registers */ +#define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2)) +#define MVPP22_TX_FIFO_SIZE_MASK 0xf + +/* TX FIFO constants */ +#define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa +#define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3 /* RX buffer constants */ #define MVPP2_SKB_SHINFO_SIZE \ @@ -576,28 +788,28 @@ enum mvpp2_tag_type { /* Sram result info bits assignment */ #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 #define MVPP2_PRS_RI_DSA_MASK 0x2 -#define MVPP2_PRS_RI_VLAN_MASK 0xc -#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_NONE 0x0 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) -#define MVPP2_PRS_RI_L2_CAST_MASK 0x600 -#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_UCAST 0x0 #define MVPP2_PRS_RI_L2_MCAST BIT(9) #define MVPP2_PRS_RI_L2_BCAST BIT(10) #define MVPP2_PRS_RI_PPPOE_MASK 0x800 -#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000 -#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_UN 0x0 #define MVPP2_PRS_RI_L3_IP4 BIT(12) #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) #define MVPP2_PRS_RI_L3_IP6 BIT(14) #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000 -#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_UCAST 0x0 #define MVPP2_PRS_RI_L3_MCAST BIT(15) #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 @@ -693,6 +905,14 @@ struct mvpp2 { /* Shared registers' base addresses */ void __iomem *base; void __iomem *lms_base; + void __iomem *iface_base; + void __iomem *mdio_base; + + void __iomem *mpcs_base; + void __iomem *xpcs_base; + void __iomem *rfu1_base; + + u32 netc_config; /* List of pointers to port structures */ struct mvpp2_port **port_list; @@ -711,7 +931,15 @@ struct mvpp2 { /* Tclk value */ u32 tclk; + /* HW version */ + enum { MVPP21, MVPP22 } hw_version; + + /* Maximum number of RXQs per port */ + unsigned int max_port_rxqs; + struct mii_dev *bus; + + int probe_done; }; struct mvpp2_pcpu_stats { @@ -724,6 +952,11 @@ struct mvpp2_pcpu_stats { struct mvpp2_port { u8 id; + /* Index of the port from the "group of ports" complex point + * of view + */ + int gop_id; + int irq; struct mvpp2 *priv; @@ -757,6 +990,8 @@ struct mvpp2_port { unsigned int duplex; unsigned int speed; + unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */ + struct mvpp2_bm_pool *pool_long; struct mvpp2_bm_pool *pool_short; @@ -798,22 +1033,24 @@ struct mvpp2_port { #define MVPP2_RXD_L3_IP6 BIT(30) #define MVPP2_RXD_BUF_HDR BIT(31) -struct mvpp2_tx_desc { +/* HW TX descriptor for PPv2.1 */ +struct mvpp21_tx_desc { u32 command; /* Options used by HW for packet transmitting.*/ u8 packet_offset; /* the offset from the buffer beginning */ u8 phys_txq; /* destination queue ID */ u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_phys_addr; /* physical addr of transmitted buffer */ + u32 buf_dma_addr; /* physical addr of transmitted buffer */ u32 buf_cookie; /* cookie for access to TX buffer in tx path */ u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ u32 reserved2; /* reserved (for future use) */ }; -struct mvpp2_rx_desc { +/* HW RX descriptor for PPv2.1 */ +struct mvpp21_rx_desc { u32 status; /* info about received packet */ u16 reserved1; /* parser_info (for future use, PnC) */ u16 data_size; /* size of received packet in bytes */ - u32 buf_phys_addr; /* physical address of the buffer */ + u32 buf_dma_addr; /* physical address of the buffer */ u32 buf_cookie; /* cookie for access to RX buffer in rx path */ u16 reserved2; /* gem_port_id (for future use, PON) */ u16 reserved3; /* csum_l4 (for future use, PnC) */ @@ -824,6 +1061,45 @@ struct mvpp2_rx_desc { u32 reserved8; }; +/* HW TX descriptor for PPv2.2 */ +struct mvpp22_tx_desc { + u32 command; + u8 packet_offset; + u8 phys_txq; + u16 data_size; + u64 reserved1; + u64 buf_dma_addr_ptp; + u64 buf_cookie_misc; +}; + +/* HW RX descriptor for PPv2.2 */ +struct mvpp22_rx_desc { + u32 status; + u16 reserved1; + u16 data_size; + u32 reserved2; + u32 reserved3; + u64 buf_dma_addr_key_hash; + u64 buf_cookie_misc; +}; + +/* Opaque type used by the driver to manipulate the HW TX and RX + * descriptors + */ +struct mvpp2_tx_desc { + union { + struct mvpp21_tx_desc pp21; + struct mvpp22_tx_desc pp22; + }; +}; + +struct mvpp2_rx_desc { + union { + struct mvpp21_rx_desc pp21; + struct mvpp22_rx_desc pp22; + }; +}; + /* Per-CPU Tx queue control */ struct mvpp2_txq_pcpu { int cpu; @@ -868,7 +1144,7 @@ struct mvpp2_tx_queue { struct mvpp2_tx_desc *descs; /* DMA address of the Tx DMA descriptors array */ - dma_addr_t descs_phys; + dma_addr_t descs_dma; /* Index of the last Tx DMA descriptor */ int last_desc; @@ -891,7 +1167,7 @@ struct mvpp2_rx_queue { struct mvpp2_rx_desc *descs; /* DMA address of the RX DMA descriptors array */ - dma_addr_t descs_phys; + dma_addr_t descs_dma; /* Index of the last RX DMA descriptor */ int last_desc; @@ -963,33 +1239,14 @@ struct mvpp2_bm_pool { int pkt_size; /* BPPE virtual base address */ - u32 *virt_addr; - /* BPPE physical base address */ - dma_addr_t phys_addr; + unsigned long *virt_addr; + /* BPPE DMA base address */ + dma_addr_t dma_addr; /* Ports using BM pool */ u32 port_map; - - /* Occupied buffers indicator */ - int in_use_thresh; }; -struct mvpp2_buff_hdr { - u32 next_buff_phys_addr; - u32 next_buff_virt_addr; - u16 byte_count; - u16 info; - u8 reserved1; /* bm_qset (for future use, BM) */ -}; - -/* Buffer header info bits */ -#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff -#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK) -#define MVPP2_B_HDR_INFO_LAST_OFFS 12 -#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12) -#define MVPP2_B_HDR_INFO_IS_LAST(info) \ - ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS) - /* Static declaractions */ /* Number of RXQs used by single port */ @@ -997,6 +1254,8 @@ static int rxq_number = MVPP2_DEFAULT_RXQ; /* Number of TXQs used by single port */ static int txq_number = MVPP2_DEFAULT_TXQ; +static int base_id; + #define MVPP2_DRIVER_NAME "mvpp2" #define MVPP2_DRIVER_VERSION "1.0" @@ -1007,8 +1266,8 @@ struct buffer_location { struct mvpp2_tx_desc *aggr_tx_descs; struct mvpp2_tx_desc *tx_descs; struct mvpp2_rx_desc *rx_descs; - u32 *bm_pool[MVPP2_BM_POOLS_NUM]; - u32 *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; + unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; + unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; int first_rxq; }; @@ -1036,6 +1295,96 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) return readl(priv->base + offset); } +static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + dma_addr_t dma_addr) +{ + if (port->priv->hw_version == MVPP21) { + tx_desc->pp21.buf_dma_addr = dma_addr; + } else { + u64 val = (u64)dma_addr; + + tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); + tx_desc->pp22.buf_dma_addr_ptp |= val; + } +} + +static void mvpp2_txdesc_size_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + size_t size) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.data_size = size; + else + tx_desc->pp22.data_size = size; +} + +static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int txq) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.phys_txq = txq; + else + tx_desc->pp22.phys_txq = txq; +} + +static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int command) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.command = command; + else + tx_desc->pp22.command = command; +} + +static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int offset) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.packet_offset = offset; + else + tx_desc->pp22.packet_offset = offset; +} + +static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_dma_addr; + else + return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); +} + +static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_cookie; + else + return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); +} + +static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.data_size; + else + return rx_desc->pp22.data_size; +} + +static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.status; + else + return rx_desc->pp22.status; +} + static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) { txq_pcpu->txq_get_index++; @@ -2218,19 +2567,26 @@ static int mvpp2_bm_pool_create(struct udevice *dev, { u32 val; + /* Number of buffer pointers must be a multiple of 16, as per + * hardware constraints + */ + if (!IS_ALIGNED(size, 16)) + return -EINVAL; + bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; - bm_pool->phys_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; + bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; if (!bm_pool->virt_addr) return -ENOMEM; - if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) { + if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, + MVPP2_BM_POOL_PTR_ALIGN)) { dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); return -ENOMEM; } mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), - bm_pool->phys_addr); + lower_32_bits(bm_pool->dma_addr)); mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); @@ -2337,17 +2693,20 @@ static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int long_pool) { - u32 val; + u32 val, mask; int prxq; /* Get queue physical ID */ prxq = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~MVPP2_RXQ_POOL_LONG_MASK; - val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & - MVPP2_RXQ_POOL_LONG_MASK); + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_LONG_MASK; + else + mask = MVPP22_RXQ_POOL_LONG_MASK; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } @@ -2363,26 +2722,48 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) } /* Get pool number from a BM cookie */ -static inline int mvpp2_bm_cookie_pool_get(u32 cookie) +static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) { return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; } /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, - u32 buf_phys_addr, u32 buf_virt_addr) + dma_addr_t buf_dma_addr, + unsigned long buf_phys_addr) { - mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); - mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); + if (port->priv->hw_version == MVPP22) { + u32 val = 0; + + if (sizeof(dma_addr_t) == 8) + val |= upper_32_bits(buf_dma_addr) & + MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; + + if (sizeof(phys_addr_t) == 8) + val |= (upper_32_bits(buf_phys_addr) + << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & + MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; + + mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val); + } + + /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply + * returned in the "cookie" field of the RX + * descriptor. Instead of storing the virtual address, we + * store the physical address + */ + mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); + mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); } /* Refill BM pool */ static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, - u32 phys_addr, u32 cookie) + dma_addr_t dma_addr, + phys_addr_t phys_addr) { int pool = mvpp2_bm_cookie_pool_get(bm); - mvpp2_bm_pool_put(port, pool, phys_addr, cookie); + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); } /* Allocate buffers for the pool */ @@ -2390,7 +2771,6 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, int buf_num) { int i; - u32 bm; if (buf_num < 0 || (buf_num + bm_pool->buf_num > bm_pool->size)) { @@ -2400,15 +2780,15 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, return 0; } - bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id); for (i = 0; i < buf_num; i++) { - mvpp2_pool_refill(port, bm, (u32)buffer_loc.rx_buffer[i], - (u32)buffer_loc.rx_buffer[i]); + mvpp2_bm_pool_put(port, bm_pool->id, + (dma_addr_t)buffer_loc.rx_buffer[i], + (unsigned long)buffer_loc.rx_buffer[i]); + } /* Update BM driver with number of buffers added to pool */ bm_pool->buf_num += i; - bm_pool->in_use_thresh = bm_pool->buf_num / 4; return i; } @@ -2502,6 +2882,7 @@ static void mvpp2_port_mii_set(struct mvpp2_port *port) val |= MVPP2_GMAC_INBAND_AN_MASK; break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: val |= MVPP2_GMAC_PORT_RGMII_MASK; default: val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; @@ -2593,22 +2974,749 @@ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } -/* Set defaults to the MVPP2 port */ -static void mvpp2_defaults_set(struct mvpp2_port *port) +/* PPv2.2 GoP/GMAC config */ + +/* Set the MAC to reset or exit from reset */ +static int gop_gmac_reset(struct mvpp2_port *port, int reset) { - int tx_port_num, val, queue, ptxq, lrxq; + u32 val; + + /* read - modify - write */ + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + if (reset) + val |= MVPP2_GMAC_PORT_RESET_MASK; + else + val &= ~MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + return 0; +} + +/* + * gop_gpcs_mode_cfg + * + * Configure port to working with Gig PCS or don't. + */ +static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + if (en) + val |= MVPP2_GMAC_PCS_ENABLE_MASK; + else + val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; + /* enable / disable PCS on this port */ + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + return 0; +} + +static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + if (en) + val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK; + else + val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK; + /* enable / disable PCS on this port */ + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + return 0; +} + +static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port) +{ + u32 val, thresh; + + /* + * Configure minimal level of the Tx FIFO before the lower part + * starts to read a packet + */ + thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH; + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + + /* Disable bypass of sync module */ + val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); + val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; + /* configure DP clock select according to mode */ + val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; + /* configure QSGMII bypass according to mode */ + val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val |= MVPP2_GMAC_PORT_DIS_PADING_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + /* + * Configure GIG MAC to 1000Base-X mode connected to a fiber + * transceiver + */ + val |= MVPP2_GMAC_PORT_TYPE_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + + /* configure AN 0x9268 */ + val = MVPP2_GMAC_EN_PCS_AN | + MVPP2_GMAC_AN_BYPASS_EN | + MVPP2_GMAC_CONFIG_MII_SPEED | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_FC_ADV_EN | + MVPP2_GMAC_CONFIG_FULL_DUPLEX | + MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} - /* Configure port to loopback if needed */ - if (port->flags & MVPP2_F_LOOPBACK) - mvpp2_port_loopback_set(port); +static void gop_gmac_sgmii_cfg(struct mvpp2_port *port) +{ + u32 val, thresh; - /* Update TX FIFO MIN Threshold */ + /* + * Configure minimal level of the Tx FIFO before the lower part + * starts to read a packet + */ + thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; - /* Min. TX threshold must be less than minimal packet length */ - val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + /* Disable bypass of sync module */ + val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); + val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; + /* configure DP clock select according to mode */ + val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; + /* configure QSGMII bypass according to mode */ + val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val |= MVPP2_GMAC_PORT_DIS_PADING_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + /* configure GIG MAC to SGMII mode */ + val &= ~MVPP2_GMAC_PORT_TYPE_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + + /* configure AN */ + val = MVPP2_GMAC_EN_PCS_AN | + MVPP2_GMAC_AN_BYPASS_EN | + MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_EN_FC_AN | + MVPP2_GMAC_AN_DUPLEX_EN | + MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void gop_gmac_rgmii_cfg(struct mvpp2_port *port) +{ + u32 val, thresh; + + /* + * Configure minimal level of the Tx FIFO before the lower part + * starts to read a packet + */ + thresh = MVPP2_RGMII_TX_FIFO_MIN_TH; + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + + /* Disable bypass of sync module */ + val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); + val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; + /* configure DP clock select according to mode */ + val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; + val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; + val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val &= ~MVPP2_GMAC_PORT_DIS_PADING_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + /* configure GIG MAC to SGMII mode */ + val &= ~MVPP2_GMAC_PORT_TYPE_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + + /* configure AN 0xb8e8 */ + val = MVPP2_GMAC_AN_BYPASS_EN | + MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_EN_FC_AN | + MVPP2_GMAC_AN_DUPLEX_EN | + MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +/* Set the internal mux's to the required MAC in the GOP */ +static int gop_gmac_mode_cfg(struct mvpp2_port *port) +{ + u32 val; + + /* Set TX FIFO thresholds */ + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + if (port->phy_speed == 2500) + gop_gmac_sgmii2_5_cfg(port); + else + gop_gmac_sgmii_cfg(port); + break; + + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + gop_gmac_rgmii_cfg(port); + break; + + default: + return -1; + } + + /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; + val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + + /* PeriodicXonEn disable */ + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); + + return 0; +} + +static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port) +{ + u32 val; + + /* relevant only for MAC0 (XLG0 and GMAC0) */ + if (port->gop_id > 0) + return; + + /* configure 1Gig MAC mode */ + val = readl(port->base + MVPP22_XLG_CTRL3_REG); + val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + writel(val, port->base + MVPP22_XLG_CTRL3_REG); +} + +static int gop_gpcs_reset(struct mvpp2_port *port, int reset) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + if (reset) + val &= ~MVPP2_GMAC_SGMII_MODE_MASK; + else + val |= MVPP2_GMAC_SGMII_MODE_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + return 0; +} + +/* Set the internal mux's to the required PCS in the PI */ +static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes) +{ + u32 val; + int lane; + + switch (num_of_lanes) { + case 1: + lane = 0; + break; + case 2: + lane = 1; + break; + case 4: + lane = 2; + break; + default: + return -1; + } + + /* configure XG MAC mode */ + val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); + val &= ~MVPP22_XPCS_PCSMODE_OFFS; + val &= ~MVPP22_XPCS_LANEACTIVE_MASK; + val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS; + writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); + + return 0; +} + +static int gop_mpcs_mode(struct mvpp2_port *port) +{ + u32 val; + + /* configure PCS40G COMMON CONTROL */ + val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL); + val &= ~FORWARD_ERROR_CORRECTION_MASK; + writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL); + + /* configure PCS CLOCK RESET */ + val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET); + val &= ~CLK_DIVISION_RATIO_MASK; + val |= 1 << CLK_DIVISION_RATIO_OFFS; + writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); + + val &= ~CLK_DIV_PHASE_SET_MASK; + val |= MAC_CLK_RESET_MASK; + val |= RX_SD_CLK_RESET_MASK; + val |= TX_SD_CLK_RESET_MASK; + writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); + + return 0; +} + +/* Set the internal mux's to the required MAC in the GOP */ +static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes) +{ + u32 val; + + /* configure 10G MAC mode */ + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val |= MVPP22_XLG_RX_FC_EN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + + val = readl(port->base + MVPP22_XLG_CTRL3_REG); + val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC; + writel(val, port->base + MVPP22_XLG_CTRL3_REG); + + /* read - modify - write */ + val = readl(port->base + MVPP22_XLG_CTRL4_REG); + val &= ~MVPP22_XLG_MODE_DMA_1G; + val |= MVPP22_XLG_FORWARD_PFC_EN; + val |= MVPP22_XLG_FORWARD_802_3X_FC_EN; + val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK; + writel(val, port->base + MVPP22_XLG_CTRL4_REG); + + /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */ + val = readl(port->base + MVPP22_XLG_CTRL1_REG); + val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK; + val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS; + writel(val, port->base + MVPP22_XLG_CTRL1_REG); + + /* unmask link change interrupt */ + val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG); + val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE; + val |= 1; /* unmask summary bit */ + writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG); + + return 0; +} + +/* Set PCS to reset or exit from reset */ +static int gop_xpcs_reset(struct mvpp2_port *port, int reset) +{ + u32 val; + + /* read - modify - write */ + val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); + if (reset) + val &= ~MVPP22_XPCS_PCSRESET; + else + val |= MVPP22_XPCS_PCSRESET; + writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); + + return 0; +} + +/* Set the MAC to reset or exit from reset */ +static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset) +{ + u32 val; + + /* read - modify - write */ + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + if (reset) + val &= ~MVPP22_XLG_MAC_RESETN; + else + val |= MVPP22_XLG_MAC_RESETN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + + return 0; +} + +/* + * gop_port_init + * + * Init physical port. Configures the port mode and all it's elements + * accordingly. + * Does not verify that the selected mode/port number is valid at the + * core level. + */ +static int gop_port_init(struct mvpp2_port *port) +{ + int mac_num = port->gop_id; + int num_of_act_lanes; + + if (mac_num >= MVPP22_GOP_MAC_NUM) { + netdev_err(NULL, "%s: illegal port number %d", __func__, + mac_num); + return -1; + } + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + gop_gmac_reset(port, 1); + + /* configure PCS */ + gop_gpcs_mode_cfg(port, 0); + gop_bypass_clk_cfg(port, 1); + + /* configure MAC */ + gop_gmac_mode_cfg(port); + /* pcs unreset */ + gop_gpcs_reset(port, 0); + + /* mac unreset */ + gop_gmac_reset(port, 0); + break; + + case PHY_INTERFACE_MODE_SGMII: + /* configure PCS */ + gop_gpcs_mode_cfg(port, 1); + + /* configure MAC */ + gop_gmac_mode_cfg(port); + /* select proper Mac mode */ + gop_xlg_2_gig_mac_cfg(port); + + /* pcs unreset */ + gop_gpcs_reset(port, 0); + /* mac unreset */ + gop_gmac_reset(port, 0); + break; + + case PHY_INTERFACE_MODE_SFI: + num_of_act_lanes = 2; + mac_num = 0; + /* configure PCS */ + gop_xpcs_mode(port, num_of_act_lanes); + gop_mpcs_mode(port); + /* configure MAC */ + gop_xlg_mac_mode_cfg(port, num_of_act_lanes); + + /* pcs unreset */ + gop_xpcs_reset(port, 0); + + /* mac unreset */ + gop_xlg_mac_reset(port, 0); + break; + + default: + netdev_err(NULL, "%s: Requested port mode (%d) not supported\n", + __func__, port->phy_interface); + return -1; + } + + return 0; +} + +static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable) +{ + u32 val; + + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + if (enable) { + /* Enable port and MIB counters update */ + val |= MVPP22_XLG_PORT_EN; + val &= ~MVPP22_XLG_MIBCNT_DIS; + } else { + /* Disable port */ + val &= ~MVPP22_XLG_PORT_EN; + } + writel(val, port->base + MVPP22_XLG_CTRL0_REG); +} + +static void gop_port_enable(struct mvpp2_port *port, int enable) +{ + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_SGMII: + if (enable) + mvpp2_port_enable(port); + else + mvpp2_port_disable(port); + break; + + case PHY_INTERFACE_MODE_SFI: + gop_xlg_mac_port_enable(port, enable); + + break; + default: + netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__, + port->phy_interface); + return; + } +} + +/* RFU1 functions */ +static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset) +{ + return readl(priv->rfu1_base + offset); +} + +static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data) +{ + writel(data, priv->rfu1_base + offset); +} + +static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type) +{ + u32 val = 0; + + if (gop_id == 2) { + if (phy_type == PHY_INTERFACE_MODE_SGMII) + val |= MV_NETC_GE_MAC2_SGMII; + } + + if (gop_id == 3) { + if (phy_type == PHY_INTERFACE_MODE_SGMII) + val |= MV_NETC_GE_MAC3_SGMII; + else if (phy_type == PHY_INTERFACE_MODE_RGMII || + phy_type == PHY_INTERFACE_MODE_RGMII_ID) + val |= MV_NETC_GE_MAC3_RGMII; + } + + return val; +} + +static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val) +{ + u32 reg; + + reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); + reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id)); + + val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id); + val &= NETC_PORTS_ACTIVE_MASK(gop_id); + + reg |= val; + + gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); +} + +static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val) +{ + u32 reg; + + reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); + reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; + + val <<= NETC_GBE_PORT1_MII_MODE_OFFS; + val &= NETC_GBE_PORT1_MII_MODE_MASK; + + reg |= val; + + gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); +} + +static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val) +{ + u32 reg; + + reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG); + reg &= ~NETC_GOP_SOFT_RESET_MASK; + + val <<= NETC_GOP_SOFT_RESET_OFFS; + val &= NETC_GOP_SOFT_RESET_MASK; + + reg |= val; + + gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg); +} + +static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val) +{ + u32 reg; + + reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); + reg &= ~NETC_CLK_DIV_PHASE_MASK; + + val <<= NETC_CLK_DIV_PHASE_OFFS; + val &= NETC_CLK_DIV_PHASE_MASK; + + reg |= val; + + gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); +} + +static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val) +{ + u32 reg; + + reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); + reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id)); + + val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id); + val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id); + + reg |= val; + + gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); +} + +static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id, + u32 val) +{ + u32 reg, mask, offset; + + if (gop_id == 2) { + mask = NETC_GBE_PORT0_SGMII_MODE_MASK; + offset = NETC_GBE_PORT0_SGMII_MODE_OFFS; + } else { + mask = NETC_GBE_PORT1_SGMII_MODE_MASK; + offset = NETC_GBE_PORT1_SGMII_MODE_OFFS; + } + reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); + reg &= ~mask; + + val <<= offset; + val &= mask; + + reg |= val; + + gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); +} + +static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val) +{ + u32 reg; + + reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); + reg &= ~NETC_BUS_WIDTH_SELECT_MASK; + + val <<= NETC_BUS_WIDTH_SELECT_OFFS; + val &= NETC_BUS_WIDTH_SELECT_MASK; + + reg |= val; + + gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); +} + +static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val) +{ + u32 reg; + + reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); + reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; + + val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS; + val &= NETC_GIG_RX_DATA_SAMPLE_MASK; + + reg |= val; + + gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); +} + +static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id, + enum mv_netc_phase phase) +{ + switch (phase) { + case MV_NETC_FIRST_PHASE: + /* Set Bus Width to HB mode = 1 */ + gop_netc_bus_width_select(priv, 1); + /* Select RGMII mode */ + gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII); + break; + + case MV_NETC_SECOND_PHASE: + /* De-assert the relevant port HB reset */ + gop_netc_port_rf_reset(priv, gop_id, 1); + break; + } +} + +static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id, + enum mv_netc_phase phase) +{ + switch (phase) { + case MV_NETC_FIRST_PHASE: + /* Set Bus Width to HB mode = 1 */ + gop_netc_bus_width_select(priv, 1); + /* Select SGMII mode */ + if (gop_id >= 1) { + gop_netc_gbe_sgmii_mode_select(priv, gop_id, + MV_NETC_GBE_SGMII); + } + + /* Configure the sample stages */ + gop_netc_sample_stages_timing(priv, 0); + /* Configure the ComPhy Selector */ + /* gop_netc_com_phy_selector_config(netComplex); */ + break; + + case MV_NETC_SECOND_PHASE: + /* De-assert the relevant port HB reset */ + gop_netc_port_rf_reset(priv, gop_id, 1); + break; + } +} + +static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase) +{ + u32 c = priv->netc_config; + + if (c & MV_NETC_GE_MAC2_SGMII) + gop_netc_mac_to_sgmii(priv, 2, phase); + else + gop_netc_mac_to_xgmii(priv, 2, phase); + + if (c & MV_NETC_GE_MAC3_SGMII) { + gop_netc_mac_to_sgmii(priv, 3, phase); + } else { + gop_netc_mac_to_xgmii(priv, 3, phase); + if (c & MV_NETC_GE_MAC3_RGMII) + gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII); + else + gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII); + } + + /* Activate gop ports 0, 2, 3 */ + gop_netc_active_port(priv, 0, 1); + gop_netc_active_port(priv, 2, 1); + gop_netc_active_port(priv, 3, 1); + + if (phase == MV_NETC_SECOND_PHASE) { + /* Enable the GOP internal clock logic */ + gop_netc_gop_clock_logic_set(priv, 1); + /* De-assert GOP unit reset */ + gop_netc_gop_reset(priv, 1); + } + + return 0; +} + +/* Set defaults to the MVPP2 port */ +static void mvpp2_defaults_set(struct mvpp2_port *port) +{ + int tx_port_num, val, queue, ptxq, lrxq; + + if (port->priv->hw_version == MVPP21) { + /* Configure port to loopback if needed */ + if (port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port); + + /* Update TX FIFO MIN Threshold */ + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + /* Min. TX threshold must be less than minimal packet length */ + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + } + /* Disable Legacy WRR, Disable EJP, Release from reset */ tx_port_num = mvpp2_egress_port(port); mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, @@ -2791,11 +3899,15 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, } /* Obtain BM cookie information from descriptor */ -static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc) +static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) { - int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; int cpu = smp_processor_id(); + int pool; + + pool = (mvpp2_rxdesc_status_get(port, rx_desc) & + MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); @@ -2944,9 +4056,11 @@ static int mvpp2_aggr_txq_init(struct udevice *dev, int desc_num, int cpu, struct mvpp2 *priv) { + u32 txq_dma; + /* Allocate memory for TX descriptors */ aggr_txq->descs = buffer_loc.aggr_tx_descs; - aggr_txq->descs_phys = (dma_addr_t)buffer_loc.aggr_tx_descs; + aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; if (!aggr_txq->descs) return -ENOMEM; @@ -2960,10 +4074,16 @@ static int mvpp2_aggr_txq_init(struct udevice *dev, aggr_txq->next_desc_to_proc = mvpp2_read(priv, MVPP2_AGGR_TXQ_INDEX_REG(cpu)); - /* Set Tx descriptors queue starting address */ - /* indirect access */ - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), - aggr_txq->descs_phys); + /* Set Tx descriptors queue starting address indirect + * access + */ + if (priv->hw_version == MVPP21) + txq_dma = aggr_txq->descs_dma; + else + txq_dma = aggr_txq->descs_dma >> + MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; + + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); return 0; @@ -2974,11 +4094,13 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { + u32 rxq_dma; + rxq->size = port->rx_ring_size; /* Allocate memory for RX descriptors */ rxq->descs = buffer_loc.rx_descs; - rxq->descs_phys = (dma_addr_t)buffer_loc.rx_descs; + rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; if (!rxq->descs) return -ENOMEM; @@ -2992,7 +4114,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, /* Set Rx descriptors queue starting address - indirect access */ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys); + if (port->priv->hw_version == MVPP21) + rxq_dma = rxq->descs_dma; + else + rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; + mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); @@ -3017,10 +4143,11 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, for (i = 0; i < rx_received; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - u32 bm = mvpp2_bm_cookie_build(rx_desc); + u32 bm = mvpp2_bm_cookie_build(port, rx_desc); - mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, - rx_desc->buf_cookie); + mvpp2_pool_refill(port, bm, + mvpp2_rxdesc_dma_addr_get(port, rx_desc), + mvpp2_rxdesc_cookie_get(port, rx_desc)); } mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); } @@ -3034,7 +4161,7 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, rxq->descs = NULL; rxq->last_desc = 0; rxq->next_desc_to_proc = 0; - rxq->descs_phys = 0; + rxq->descs_dma = 0; /* Clear Rx descriptors queue starting address and size; * free descriptor number @@ -3057,7 +4184,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, /* Allocate memory for Tx descriptors */ txq->descs = buffer_loc.tx_descs; - txq->descs_phys = (dma_addr_t)buffer_loc.tx_descs; + txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; if (!txq->descs) return -ENOMEM; @@ -3069,7 +4196,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, /* Set Tx descriptors queue starting address - indirect access */ mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); @@ -3090,7 +4217,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | - MVPP2_PREF_BUF_THRESH(desc_per_txq/2)); + MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); /* WRR / EJP configuration - indirect access */ tx_port_num = mvpp2_egress_port(port); @@ -3121,7 +4248,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, txq->descs = NULL; txq->last_desc = 0; txq->next_desc_to_proc = 0; - txq->descs_phys = 0; + txq->descs_dma = 0; /* Set minimum bandwidth for disabled TXQs */ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); @@ -3314,20 +4441,21 @@ static void mvpp2_link_event(struct mvpp2_port *port) static void mvpp2_rx_error(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc) { - u32 status = rx_desc->status; + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); switch (status & MVPP2_RXD_ERR_CODE_MASK) { case MVPP2_RXD_ERR_CRC: - netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", + status, sz); break; case MVPP2_RXD_ERR_OVERRUN: - netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", + status, sz); break; case MVPP2_RXD_ERR_RESOURCE: - netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n", - status, rx_desc->data_size); + netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", + status, sz); break; } } @@ -3335,9 +4463,9 @@ static void mvpp2_rx_error(struct mvpp2_port *port, /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, - u32 bm, u32 phys_addr) + u32 bm, dma_addr_t dma_addr) { - mvpp2_pool_refill(port, bm, phys_addr, phys_addr); + mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); return 0; } @@ -3347,7 +4475,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port) mvpp2_gmac_max_rx_size_set(port); mvpp2_txp_max_tx_size_set(port); - mvpp2_port_enable(port); + if (port->priv->hw_version == MVPP21) + mvpp2_port_enable(port); + else + gop_port_enable(port, 1); } /* Set hw internals when stopping port */ @@ -3357,7 +4488,11 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) mvpp2_ingress_disable(port); mvpp2_egress_disable(port); - mvpp2_port_disable(port); + + if (port->priv->hw_version == MVPP21) + mvpp2_port_disable(port); + else + gop_port_enable(port, 0); } static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) @@ -3449,9 +4584,14 @@ static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) static void mvpp2_port_power_up(struct mvpp2_port *port) { - mvpp2_port_mii_set(port); + struct mvpp2 *priv = port->priv; + + /* On PPv2.2 the GoP / interface configuration has already been done */ + if (priv->hw_version == MVPP21) + mvpp2_port_mii_set(port); mvpp2_port_periodic_xon_disable(port); - mvpp2_port_fc_adv_enable(port); + if (priv->hw_version == MVPP21) + mvpp2_port_fc_adv_enable(port); mvpp2_port_reset(port); } @@ -3462,12 +4602,16 @@ static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) struct mvpp2_txq_pcpu *txq_pcpu; int queue, cpu, err; - if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM) + if (port->first_rxq + rxq_number > + MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; /* Disable port */ mvpp2_egress_disable(port); - mvpp2_port_disable(port); + if (priv->hw_version == MVPP21) + mvpp2_port_disable(port); + else + gop_port_enable(port, 0); port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), GFP_KERNEL); @@ -3523,7 +4667,19 @@ static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) } /* Configure Rx queue group interrupt for this port */ - mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), CONFIG_MV_ETH_RXQ); + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + CONFIG_MV_ETH_RXQ); + } else { + u32 val; + + val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = (CONFIG_MV_ETH_RXQ << + MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } /* Create Rx descriptor rings */ for (queue = 0; queue < rxq_number; queue++) { @@ -3554,20 +4710,14 @@ static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) return 0; } -/* Ports initialization */ -static int mvpp2_port_probe(struct udevice *dev, - struct mvpp2_port *port, - int port_node, - struct mvpp2 *priv, - int *next_first_rxq) +static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port) { + int port_node = dev_of_offset(dev); + const char *phy_mode_str; int phy_node; u32 id; u32 phyaddr; - const char *phy_mode_str; int phy_mode = -1; - int priv_common_regs_num = 2; - int err; phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); if (phy_node < 0) { @@ -3589,34 +4739,48 @@ static int mvpp2_port_probe(struct udevice *dev, return -EINVAL; } + /* + * ToDo: + * Not sure if this DT property "phy-speed" will get accepted, so + * this might change later + */ + /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */ + port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node, + "phy-speed", 1000); + phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0); - port->priv = priv; port->id = id; - port->first_rxq = *next_first_rxq; + if (port->priv->hw_version == MVPP21) + port->first_rxq = port->id * rxq_number; + else + port->first_rxq = port->id * port->priv->max_port_rxqs; port->phy_node = phy_node; port->phy_interface = phy_mode; port->phyaddr = phyaddr; - port->base = (void __iomem *)dev_get_addr_index(dev->parent, - priv_common_regs_num - + id); - if (IS_ERR(port->base)) - return PTR_ERR(port->base); + return 0; +} + +/* Ports initialization */ +static int mvpp2_port_probe(struct udevice *dev, + struct mvpp2_port *port, + int port_node, + struct mvpp2 *priv) +{ + int err; port->tx_ring_size = MVPP2_MAX_TXD; port->rx_ring_size = MVPP2_MAX_RXD; err = mvpp2_port_init(dev, port); if (err < 0) { - dev_err(&pdev->dev, "failed to init port %d\n", id); + dev_err(&pdev->dev, "failed to init port %d\n", port->id); return err; } mvpp2_port_power_up(port); - /* Increment the first Rx queue number to be used by the next port */ - *next_first_rxq += CONFIG_MV_ETH_RXQ; - priv->port_list[id] = port; + priv->port_list[port->id] = port; return 0; } @@ -3659,10 +4823,35 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv) int port; for (port = 0; port < MVPP2_MAX_PORTS; port++) { - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_DATA_SIZE); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_ATTR_SIZE); + if (priv->hw_version == MVPP22) { + if (port == 0) { + mvpp2_write(priv, + MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE); + mvpp2_write(priv, + MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE); + } else if (port == 1) { + mvpp2_write(priv, + MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE); + mvpp2_write(priv, + MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE); + } else { + mvpp2_write(priv, + MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE); + mvpp2_write(priv, + MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE); + } + } else { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP21_RX_FIFO_PORT_DATA_SIZE); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP21_RX_FIFO_PORT_ATTR_SIZE); + } } mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, @@ -3670,6 +4859,78 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } +/* Initialize Tx FIFO's */ +static void mvpp2_tx_fifo_init(struct mvpp2 *priv) +{ + int port, val; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + /* Port 0 supports 10KB TX FIFO */ + if (port == 0) { + val = MVPP2_TX_FIFO_DATA_SIZE_10KB & + MVPP22_TX_FIFO_SIZE_MASK; + } else { + val = MVPP2_TX_FIFO_DATA_SIZE_3KB & + MVPP22_TX_FIFO_SIZE_MASK; + } + mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val); + } +} + +static void mvpp2_axi_init(struct mvpp2 *priv) +{ + u32 val, rdval, wrval; + + mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); + + /* AXI Bridge Configuration */ + + rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + /* BM */ + mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); + + /* Descriptors */ + mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); + + /* Buffer Data */ + mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); + + val = MVPP22_AXI_CODE_CACHE_NON_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); + mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); +} + /* Initialize network controller common part HW */ static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) { @@ -3678,7 +4939,8 @@ static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) u32 val; /* Checks for hardware constraints (U-Boot uses only one rxq) */ - if ((rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) { + if ((rxq_number > priv->max_port_rxqs) || + (txq_number > MVPP2_MAX_TXQ)) { dev_err(&pdev->dev, "invalid queue size parameter\n"); return -EINVAL; } @@ -3688,10 +4950,20 @@ static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) if (dram_target_info) mvpp2_conf_mbus_windows(dram_target_info, priv); - /* Disable HW PHY polling */ - val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); - val |= MVPP2_PHY_AN_STOP_SMI0_MASK; - writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + if (priv->hw_version == MVPP22) + mvpp2_axi_init(priv); + + if (priv->hw_version == MVPP21) { + /* Disable HW PHY polling */ + val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + val |= MVPP2_PHY_AN_STOP_SMI0_MASK; + writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + } else { + /* Enable HW PHY polling */ + val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + val |= MVPP22_SMI_POLLING_EN; + writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + } /* Allocate and initialize aggregated TXQs */ priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), @@ -3712,13 +4984,32 @@ static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) /* Rx Fifo Init */ mvpp2_rx_fifo_init(priv); + /* Tx Fifo Init */ + if (priv->hw_version == MVPP22) + mvpp2_tx_fifo_init(priv); + /* Reset Rx queue group interrupt configuration */ - for (i = 0; i < MVPP2_MAX_PORTS; i++) - mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), - CONFIG_MV_ETH_RXQ); + for (i = 0; i < MVPP2_MAX_PORTS; i++) { + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), + CONFIG_MV_ETH_RXQ); + continue; + } else { + u32 val; + + val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = (CONFIG_MV_ETH_RXQ << + MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); + mvpp2_write(priv, + MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } + } - writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, - priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); + if (priv->hw_version == MVPP21) + writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, + priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); /* Allow cache snoop when transmiting packets */ mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); @@ -3749,7 +5040,7 @@ static int smi_wait_ready(struct mvpp2 *priv) /* wait till the SMI is not busy */ do { /* read smi register */ - smi_reg = readl(priv->lms_base + MVPP2_SMI); + smi_reg = readl(priv->mdio_base); if (timeout-- == 0) { printf("Error: SMI busy timeout\n"); return -EFAULT; @@ -3791,14 +5082,14 @@ static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) | MVPP2_SMI_OPCODE_READ; /* write the smi register */ - writel(smi_reg, priv->lms_base + MVPP2_SMI); + writel(smi_reg, priv->mdio_base); /* wait till read value is ready */ timeout = MVPP2_SMI_TIMEOUT; do { /* read smi register */ - smi_reg = readl(priv->lms_base + MVPP2_SMI); + smi_reg = readl(priv->mdio_base); if (timeout-- == 0) { printf("Err: SMI read ready timeout\n"); return -EFAULT; @@ -3809,7 +5100,7 @@ static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++) ; - return readl(priv->lms_base + MVPP2_SMI) & MVPP2_SMI_DATA_MASK; + return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK; } /* @@ -3846,7 +5137,7 @@ static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, smi_reg &= ~MVPP2_SMI_OPCODE_READ; /* write the smi register */ - writel(smi_reg, priv->lms_base + MVPP2_SMI); + writel(smi_reg, priv->mdio_base); return 0; } @@ -3856,7 +5147,7 @@ static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) struct mvpp2_port *port = dev_get_priv(dev); struct mvpp2_rx_desc *rx_desc; struct mvpp2_bm_pool *bm_pool; - dma_addr_t phys_addr; + dma_addr_t dma_addr; u32 bm, rx_status; int pool, rx_bytes, err; int rx_received; @@ -3885,18 +5176,15 @@ static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) return 0; rx_desc = mvpp2_rxq_next_desc_get(rxq); - rx_status = rx_desc->status; - rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; - phys_addr = rx_desc->buf_phys_addr; + rx_status = mvpp2_rxdesc_status_get(port, rx_desc); + rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); + rx_bytes -= MVPP2_MH_SIZE; + dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); - bm = mvpp2_bm_cookie_build(rx_desc); + bm = mvpp2_bm_cookie_build(port, rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); bm_pool = &port->priv->bm_pools[pool]; - /* Check if buffer header is used */ - if (rx_status & MVPP2_RXD_BUF_HDR) - return 0; - /* In case of an error, release the requested buffer pointer * to the Buffer Manager. This request process is controlled * by the hardware, and the information about the buffer is @@ -3905,12 +5193,11 @@ static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) if (rx_status & MVPP2_RXD_ERR_SUMMARY) { mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ - mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, - rx_desc->buf_cookie); + mvpp2_pool_refill(port, bm, dma_addr, dma_addr); return 0; } - err = mvpp2_rx_refill(port, bm_pool, bm, phys_addr); + err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); return 0; @@ -3921,7 +5208,7 @@ static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) mvpp2_rxq_status_update(port, rxq->id, 1, 1); /* give packet to stack - skip on first n bytes */ - data = (u8 *)phys_addr + 2 + 32; + data = (u8 *)dma_addr + 2 + 32; if (rx_bytes <= 0) return 0; @@ -3963,16 +5250,20 @@ static int mvpp2_send(struct udevice *dev, void *packet, int length) /* Get a descriptor for the first part of the packet */ tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - tx_desc->phys_txq = txq->id; - tx_desc->data_size = length; - tx_desc->packet_offset = (u32)packet & MVPP2_TX_DESC_ALIGN; - tx_desc->buf_phys_addr = (u32)packet & ~MVPP2_TX_DESC_ALIGN; + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, length); + mvpp2_txdesc_offset_set(port, tx_desc, + (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, + (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); /* First and Last descriptor */ - tx_desc->command = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE - | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; + mvpp2_txdesc_cmd_set(port, tx_desc, + MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE + | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); /* Flush tx data */ - flush_dcache_range((u32)packet, (u32)packet + length); + flush_dcache_range((unsigned long)packet, + (unsigned long)packet + ALIGN(length, PKTALIGN)); /* Enable transmit */ mb(); @@ -4034,43 +5325,14 @@ static void mvpp2_stop(struct udevice *dev) mvpp2_cleanup_txqs(port); } -static int mvpp2_probe(struct udevice *dev) +static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port) { - struct mvpp2_port *port = dev_get_priv(dev); - struct mvpp2 *priv = dev_get_priv(dev->parent); - int err; - - /* Initialize network controller */ - err = mvpp2_init(dev, priv); - if (err < 0) { - dev_err(&pdev->dev, "failed to initialize controller\n"); - return err; - } + writel(port->phyaddr, port->priv->iface_base + + MVPP22_SMI_PHY_ADDR_REG(port->gop_id)); - return mvpp2_port_probe(dev, port, dev_of_offset(dev), priv, - &buffer_loc.first_rxq); + return 0; } -static const struct eth_ops mvpp2_ops = { - .start = mvpp2_start, - .send = mvpp2_send, - .recv = mvpp2_recv, - .stop = mvpp2_stop, -}; - -static struct driver mvpp2_driver = { - .name = "mvpp2", - .id = UCLASS_ETH, - .probe = mvpp2_probe, - .ops = &mvpp2_ops, - .priv_auto_alloc_size = sizeof(struct mvpp2_port), - .platdata_auto_alloc_size = sizeof(struct eth_pdata), -}; - -/* - * Use a MISC device to bind the n instances (child nodes) of the - * network base controller in UCLASS_ETH. - */ static int mvpp2_base_probe(struct udevice *dev) { struct mvpp2 *priv = dev_get_priv(dev); @@ -4079,6 +5341,9 @@ static int mvpp2_base_probe(struct udevice *dev) u32 size = 0; int i; + /* Save hw-version */ + priv->hw_version = dev_get_driver_data(dev); + /* * U-Boot special buffer handling: * @@ -4089,35 +5354,66 @@ static int mvpp2_base_probe(struct udevice *dev) /* Align buffer area for descs and rx_buffers to 1MiB */ bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); - mmu_set_region_dcache_behaviour((u32)bd_space, BD_SPACE, DCACHE_OFF); + mmu_set_region_dcache_behaviour((unsigned long)bd_space, + BD_SPACE, DCACHE_OFF); buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; - buffer_loc.tx_descs = (struct mvpp2_tx_desc *)((u32)bd_space + size); + buffer_loc.tx_descs = + (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; - buffer_loc.rx_descs = (struct mvpp2_rx_desc *)((u32)bd_space + size); + buffer_loc.rx_descs = + (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { - buffer_loc.bm_pool[i] = (u32 *)((u32)bd_space + size); - size += MVPP2_BM_POOL_SIZE_MAX * sizeof(u32); + buffer_loc.bm_pool[i] = + (unsigned long *)((unsigned long)bd_space + size); + if (priv->hw_version == MVPP21) + size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32); + else + size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64); } for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { - buffer_loc.rx_buffer[i] = (u32 *)((u32)bd_space + size); + buffer_loc.rx_buffer[i] = + (unsigned long *)((unsigned long)bd_space + size); size += RX_BUFFER_SIZE; } + /* Clear the complete area so that all descriptors are cleared */ + memset(bd_space, 0, size); + /* Save base addresses for later use */ priv->base = (void *)dev_get_addr_index(dev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); - priv->lms_base = (void *)dev_get_addr_index(dev, 1); - if (IS_ERR(priv->lms_base)) - return PTR_ERR(priv->lms_base); + if (priv->hw_version == MVPP21) { + priv->lms_base = (void *)dev_get_addr_index(dev, 1); + if (IS_ERR(priv->lms_base)) + return PTR_ERR(priv->lms_base); + + priv->mdio_base = priv->lms_base + MVPP21_SMI; + } else { + priv->iface_base = (void *)dev_get_addr_index(dev, 1); + if (IS_ERR(priv->iface_base)) + return PTR_ERR(priv->iface_base); + + priv->mdio_base = priv->iface_base + MVPP22_SMI; + + /* Store common base addresses for all ports */ + priv->mpcs_base = priv->iface_base + MVPP22_MPCS; + priv->xpcs_base = priv->iface_base + MVPP22_XPCS; + priv->rfu1_base = priv->iface_base + MVPP22_RFU1; + } + + if (priv->hw_version == MVPP21) + priv->max_port_rxqs = 8; + else + priv->max_port_rxqs = 32; /* Finally create and register the MDIO bus driver */ bus = mdio_alloc(); @@ -4135,6 +5431,96 @@ static int mvpp2_base_probe(struct udevice *dev) return mdio_register(bus); } +static int mvpp2_probe(struct udevice *dev) +{ + struct mvpp2_port *port = dev_get_priv(dev); + struct mvpp2 *priv = dev_get_priv(dev->parent); + int err; + + /* Only call the probe function for the parent once */ + if (!priv->probe_done) { + err = mvpp2_base_probe(dev->parent); + priv->probe_done = 1; + } + + port->priv = dev_get_priv(dev->parent); + + err = phy_info_parse(dev, port); + if (err) + return err; + + /* + * We need the port specific io base addresses at this stage, since + * gop_port_init() accesses these registers + */ + if (priv->hw_version == MVPP21) { + int priv_common_regs_num = 2; + + port->base = (void __iomem *)dev_get_addr_index( + dev->parent, priv_common_regs_num + port->id); + if (IS_ERR(port->base)) + return PTR_ERR(port->base); + } else { + port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), + "gop-port-id", -1); + if (port->id == -1) { + dev_err(&pdev->dev, "missing gop-port-id value\n"); + return -EINVAL; + } + + port->base = priv->iface_base + MVPP22_PORT_BASE + + port->gop_id * MVPP22_PORT_OFFSET; + + /* Set phy address of the port */ + mvpp22_smi_phy_addr_cfg(port); + + /* GoP Init */ + gop_port_init(port); + } + + /* Initialize network controller */ + err = mvpp2_init(dev, priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + return err; + } + + err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv); + if (err) + return err; + + if (priv->hw_version == MVPP22) { + priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id, + port->phy_interface); + + /* Netcomplex configurations for all ports */ + gop_netc_init(priv, MV_NETC_FIRST_PHASE); + gop_netc_init(priv, MV_NETC_SECOND_PHASE); + } + + return 0; +} + +static const struct eth_ops mvpp2_ops = { + .start = mvpp2_start, + .send = mvpp2_send, + .recv = mvpp2_recv, + .stop = mvpp2_stop, +}; + +static struct driver mvpp2_driver = { + .name = "mvpp2", + .id = UCLASS_ETH, + .probe = mvpp2_probe, + .ops = &mvpp2_ops, + .priv_auto_alloc_size = sizeof(struct mvpp2_port), + .platdata_auto_alloc_size = sizeof(struct eth_pdata), +}; + +/* + * Use a MISC device to bind the n instances (child nodes) of the + * network base controller in UCLASS_ETH. + */ static int mvpp2_base_bind(struct udevice *parent) { const void *blob = gd->fdt_blob; @@ -4145,6 +5531,7 @@ static int mvpp2_base_bind(struct udevice *parent) char *name; int subnode; u32 id; + int base_id_add; /* Lookup eth driver */ drv = lists_uclass_lookup(UCLASS_ETH); @@ -4153,7 +5540,12 @@ static int mvpp2_base_bind(struct udevice *parent) return -ENOENT; } + base_id_add = base_id; + fdt_for_each_subnode(subnode, blob, node) { + /* Increment base_id for all subnodes, also the disabled ones */ + base_id++; + /* Skip disabled ports */ if (!fdtdec_get_is_enabled(blob, subnode)) continue; @@ -4163,6 +5555,7 @@ static int mvpp2_base_bind(struct udevice *parent) return -ENOMEM; id = fdtdec_get_int(blob, subnode, "port-id", -1); + id += base_id_add; name = calloc(1, 16); sprintf(name, "mvpp2-%d", id); @@ -4176,7 +5569,14 @@ static int mvpp2_base_bind(struct udevice *parent) } static const struct udevice_id mvpp2_ids[] = { - { .compatible = "marvell,armada-375-pp2" }, + { + .compatible = "marvell,armada-375-pp2", + .data = MVPP21, + }, + { + .compatible = "marvell,armada-7k-pp22", + .data = MVPP22, + }, { } }; @@ -4185,6 +5585,5 @@ U_BOOT_DRIVER(mvpp2_base) = { .id = UCLASS_MISC, .of_match = mvpp2_ids, .bind = mvpp2_base_bind, - .probe = mvpp2_base_probe, .priv_auto_alloc_size = sizeof(struct mvpp2), }; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 6ee8bc3134..aca3990aeb 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -103,4 +103,14 @@ config PHY_VITESSE config PHY_XILINX bool "Xilinx Ethernet PHYs support" +config PHY_FIXED + bool "Fixed-Link PHY" + depends on DM_ETH + help + Fixed PHY is used for having a 'fixed-link' to another MAC with a direct + connection (MII, RGMII, ...). + There is nothing like autoneogation and so + on, the link is always up with fixed speed and fixed duplex-setting. + More information: doc/device-tree-bindings/net/fixed-link.txt + endif #PHYLIB diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index d37297122a..88c00a5cd3 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_PHY_TI) += ti.o obj-$(CONFIG_PHY_XILINX) += xilinx_phy.o obj-$(CONFIG_PHY_VITESSE) += vitesse.o obj-$(CONFIG_PHY_MSCC) += mscc.o +obj-$(CONFIG_PHY_FIXED) += fixed.o diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c new file mode 100644 index 0000000000..df8235645e --- /dev/null +++ b/drivers/net/phy/fixed.c @@ -0,0 +1,82 @@ +/* + * Fixed-Link phy + * + * Copyright 2017 Bernecker & Rainer Industrieelektronik GmbH + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <config.h> +#include <common.h> +#include <phy.h> +#include <dm.h> +#include <fdt_support.h> + +DECLARE_GLOBAL_DATA_PTR; + +int fixedphy_probe(struct phy_device *phydev) +{ + struct fixed_link *priv; + int ofnode = phydev->addr; + u32 val; + + /* check for mandatory properties within fixed-link node */ + val = fdt_getprop_u32_default_node(gd->fdt_blob, + ofnode, 0, "speed", 0); + if (val != SPEED_10 && val != SPEED_100 && val != SPEED_1000) { + printf("ERROR: no/invalid speed given in fixed-link node!"); + return -EINVAL; + } + + priv = malloc(sizeof(*priv)); + if (!priv) + return -ENOMEM; + memset(priv, 0, sizeof(*priv)); + + phydev->priv = priv; + phydev->addr = 0; + + priv->link_speed = val; + priv->duplex = fdtdec_get_bool(gd->fdt_blob, ofnode, "full-duplex"); + priv->pause = fdtdec_get_bool(gd->fdt_blob, ofnode, "pause"); + priv->asym_pause = fdtdec_get_bool(gd->fdt_blob, ofnode, "asym-pause"); + + /* fixed-link phy must not be reset by core phy code */ + phydev->flags |= PHY_FLAG_BROKEN_RESET; + + return 0; +} + +int fixedphy_startup(struct phy_device *phydev) +{ + struct fixed_link *priv = phydev->priv; + + phydev->asym_pause = priv->asym_pause; + phydev->pause = priv->pause; + phydev->duplex = priv->duplex; + phydev->speed = priv->link_speed; + phydev->link = 1; + + return 0; +} + +int fixedphy_shutdown(struct phy_device *phydev) +{ + return 0; +} + +static struct phy_driver fixedphy_driver = { + .uid = PHY_FIXED_ID, + .mask = 0xffffffff, + .name = "Fixed PHY", + .features = PHY_GBIT_FEATURES | SUPPORTED_MII, + .probe = fixedphy_probe, + .startup = fixedphy_startup, + .shutdown = fixedphy_shutdown, +}; + +int phy_fixed_init(void) +{ + phy_register(&fixedphy_driver); + return 0; +} diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 8db65749b1..8bacd991ad 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -515,7 +515,9 @@ int phy_init(void) #ifdef CONFIG_PHY_MSCC phy_mscc_init(); #endif - +#ifdef CONFIG_PHY_FIXED + phy_fixed_init(); +#endif return 0; } @@ -854,9 +856,24 @@ struct phy_device *phy_connect(struct mii_dev *bus, int addr, struct eth_device *dev, phy_interface_t interface) #endif { - struct phy_device *phydev; + struct phy_device *phydev = NULL; +#ifdef CONFIG_PHY_FIXED + int sn; + const char *name; + sn = fdt_first_subnode(gd->fdt_blob, dev->of_offset); + while (sn > 0) { + name = fdt_get_name(gd->fdt_blob, sn, NULL); + if (name != NULL && strcmp(name, "fixed-link") == 0) { + phydev = phy_device_create(bus, + sn, PHY_FIXED_ID, interface); + break; + } + sn = fdt_next_subnode(gd->fdt_blob, sn); + } +#endif + if (phydev == NULL) + phydev = phy_find_by_mask(bus, 1 << addr, interface); - phydev = phy_find_by_mask(bus, 1 << addr, interface); if (phydev) phy_connect_dev(phydev, dev); else diff --git a/drivers/net/smc91111.h b/drivers/net/smc91111.h index 5197f36039..a31f6f6db0 100644 --- a/drivers/net/smc91111.h +++ b/drivers/net/smc91111.h @@ -253,8 +253,6 @@ struct smc91111_priv{ #ifdef CONFIG_ADNPESC1 #define SMC_inw(a,r) (*((volatile word *)((a)->iobase+((r)<<1)))) -#elif CONFIG_BLACKFIN -#define SMC_inw(a,r) ({ word __v = (*((volatile word *)((a)->iobase+(r)))); SSYNC(); __v;}) #elif CONFIG_ARM64 #define SMC_inw(a, r) (*((volatile word*)((a)->iobase+((dword)(r))))) #else @@ -264,11 +262,6 @@ struct smc91111_priv{ #ifdef CONFIG_ADNPESC1 #define SMC_outw(a,d,r) (*((volatile word *)((a)->iobase+((r)<<1))) = d) -#elif CONFIG_BLACKFIN -#define SMC_outw(a, d, r) \ - ({ (*((volatile word*)((a)->iobase+((r)))) = d); \ - SSYNC(); \ - }) #elif CONFIG_ARM64 #define SMC_outw(a, d, r) \ (*((volatile word*)((a)->iobase+((dword)(r)))) = d) diff --git a/drivers/net/sun8i_emac.c b/drivers/net/sun8i_emac.c index b87210bad7..a3dbe2823b 100644 --- a/drivers/net/sun8i_emac.c +++ b/drivers/net/sun8i_emac.c @@ -21,6 +21,9 @@ #include <malloc.h> #include <miiphy.h> #include <net.h> +#ifdef CONFIG_DM_GPIO +#include <asm-generic/gpio.h> +#endif #define MDIO_CMD_MII_BUSY BIT(0) #define MDIO_CMD_MII_WRITE BIT(1) @@ -62,7 +65,7 @@ #define AHB_GATE_OFFSET_EPHY 0 -#if defined(CONFIG_MACH_SUN8I_H3) +#if defined(CONFIG_MACH_SUNXI_H3_H5) #define SUN8I_GPD8_GMAC 2 #else #define SUN8I_GPD8_GMAC 4 @@ -128,11 +131,22 @@ struct emac_eth_dev { phys_addr_t sysctl_reg; struct phy_device *phydev; struct mii_dev *bus; +#ifdef CONFIG_DM_GPIO + struct gpio_desc reset_gpio; +#endif +}; + + +struct sun8i_eth_pdata { + struct eth_pdata eth_pdata; + u32 reset_delays[3]; }; + static int sun8i_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) { - struct emac_eth_dev *priv = bus->priv; + struct udevice *dev = bus->priv; + struct emac_eth_dev *priv = dev_get_priv(dev); ulong start; u32 miiaddr = 0; int timeout = CONFIG_MDIO_TIMEOUT; @@ -164,7 +178,8 @@ static int sun8i_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) static int sun8i_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, u16 val) { - struct emac_eth_dev *priv = bus->priv; + struct udevice *dev = bus->priv; + struct emac_eth_dev *priv = dev_get_priv(dev); ulong start; u32 miiaddr = 0; int ret = -1, timeout = CONFIG_MDIO_TIMEOUT; @@ -604,7 +619,41 @@ static void sun8i_emac_board_setup(struct emac_eth_dev *priv) setbits_le32(&ccm->ahb_reset0_cfg, BIT(AHB_RESET_OFFSET_GMAC)); } -static int sun8i_mdio_init(const char *name, struct emac_eth_dev *priv) +#if defined(CONFIG_DM_GPIO) +static int sun8i_mdio_reset(struct mii_dev *bus) +{ + struct udevice *dev = bus->priv; + struct emac_eth_dev *priv = dev_get_priv(dev); + struct sun8i_eth_pdata *pdata = dev_get_platdata(dev); + int ret; + + if (!dm_gpio_is_valid(&priv->reset_gpio)) + return 0; + + /* reset the phy */ + ret = dm_gpio_set_value(&priv->reset_gpio, 0); + if (ret) + return ret; + + udelay(pdata->reset_delays[0]); + + ret = dm_gpio_set_value(&priv->reset_gpio, 1); + if (ret) + return ret; + + udelay(pdata->reset_delays[1]); + + ret = dm_gpio_set_value(&priv->reset_gpio, 0); + if (ret) + return ret; + + udelay(pdata->reset_delays[2]); + + return 0; +} +#endif + +static int sun8i_mdio_init(const char *name, struct udevice *priv) { struct mii_dev *bus = mdio_alloc(); @@ -617,6 +666,9 @@ static int sun8i_mdio_init(const char *name, struct emac_eth_dev *priv) bus->write = sun8i_mdio_write; snprintf(bus->name, sizeof(bus->name), name); bus->priv = (void *)priv; +#if defined(CONFIG_DM_GPIO) + bus->reset = sun8i_mdio_reset; +#endif return mdio_register(bus); } @@ -696,7 +748,7 @@ static int sun8i_emac_eth_probe(struct udevice *dev) sun8i_emac_board_setup(priv); sun8i_emac_set_syscon(priv); - sun8i_mdio_init(dev->name, priv); + sun8i_mdio_init(dev->name, dev); priv->bus = miiphy_get_dev_by_name(dev->name); return sun8i_phy_init(priv, dev); @@ -713,11 +765,16 @@ static const struct eth_ops sun8i_emac_eth_ops = { static int sun8i_emac_eth_ofdata_to_platdata(struct udevice *dev) { - struct eth_pdata *pdata = dev_get_platdata(dev); + struct sun8i_eth_pdata *sun8i_pdata = dev_get_platdata(dev); + struct eth_pdata *pdata = &sun8i_pdata->eth_pdata; struct emac_eth_dev *priv = dev_get_priv(dev); const char *phy_mode; int node = dev_of_offset(dev); int offset = 0; +#ifdef CONFIG_DM_GPIO + int reset_flags = GPIOD_IS_OUT; + int ret = 0; +#endif pdata->iobase = dev_get_addr_name(dev, "emac"); priv->sysctl_reg = dev_get_addr_name(dev, "syscon"); @@ -762,6 +819,23 @@ static int sun8i_emac_eth_ofdata_to_platdata(struct udevice *dev) if (!priv->use_internal_phy) parse_phy_pins(dev); +#ifdef CONFIG_DM_GPIO + if (fdtdec_get_bool(gd->fdt_blob, dev->of_offset, + "snps,reset-active-low")) + reset_flags |= GPIOD_ACTIVE_LOW; + + ret = gpio_request_by_name(dev, "snps,reset-gpio", 0, + &priv->reset_gpio, reset_flags); + + if (ret == 0) { + ret = fdtdec_get_int_array(gd->fdt_blob, dev->of_offset, + "snps,reset-delays-us", + sun8i_pdata->reset_delays, 3); + } else if (ret == -ENOENT) { + ret = 0; + } +#endif + return 0; } @@ -782,6 +856,6 @@ U_BOOT_DRIVER(eth_sun8i_emac) = { .probe = sun8i_emac_eth_probe, .ops = &sun8i_emac_eth_ops, .priv_auto_alloc_size = sizeof(struct emac_eth_dev), - .platdata_auto_alloc_size = sizeof(struct eth_pdata), + .platdata_auto_alloc_size = sizeof(struct sun8i_eth_pdata), .flags = DM_FLAG_ALLOC_PRIV_DMA, }; diff --git a/drivers/net/sunxi_emac.c b/drivers/net/sunxi_emac.c index 11cd0ea068..99339db4bf 100644 --- a/drivers/net/sunxi_emac.c +++ b/drivers/net/sunxi_emac.c @@ -327,6 +327,20 @@ static void emac_reset(struct emac_eth_dev *priv) udelay(200); } +static int _sunxi_write_hwaddr(struct emac_eth_dev *priv, u8 *enetaddr) +{ + struct emac_regs *regs = priv->regs; + u32 enetaddr_lo, enetaddr_hi; + + enetaddr_lo = enetaddr[2] | (enetaddr[1] << 8) | (enetaddr[0] << 16); + enetaddr_hi = enetaddr[5] | (enetaddr[4] << 8) | (enetaddr[3] << 16); + + writel(enetaddr_hi, ®s->mac_a1); + writel(enetaddr_lo, ®s->mac_a0); + + return 0; +} + static int _sunxi_emac_eth_init(struct emac_eth_dev *priv, u8 *enetaddr) { struct emac_regs *regs = priv->regs; @@ -350,10 +364,7 @@ static int _sunxi_emac_eth_init(struct emac_eth_dev *priv, u8 *enetaddr) /* Set up EMAC */ emac_setup(priv); - writel(enetaddr[0] << 16 | enetaddr[1] << 8 | enetaddr[2], - ®s->mac_a1); - writel(enetaddr[3] << 16 | enetaddr[4] << 8 | enetaddr[5], - ®s->mac_a0); + _sunxi_write_hwaddr(priv, enetaddr); mdelay(1); |