summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c689
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.h96
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c186
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h14
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c89
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c114
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c499
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h344
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h59
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c36
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/broadcom/unimac.h68
23 files changed, 1821 insertions, 574 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7b79528d6eed..f8a168b73307 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -51,6 +51,14 @@ config B44_PCI
depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
default y
+config BCM4908_ENET
+ tristate "Broadcom BCM4908 internal mac support"
+ depends on ARCH_BCM4908 || COMPILE_TEST
+ default y
+ help
+ This driver supports Ethernet controller integrated into Broadcom
+ BCM4908 family SoCs.
+
config BCM63XX_ENET
tristate "Broadcom 63xx internal mac support"
depends on BCM63XX
@@ -174,7 +182,6 @@ config BGMAC_BCMA
config BGMAC_PLATFORM
tristate "Broadcom iProc GBit platform support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on OF
select BGMAC
select PHYLIB
select FIXED_PHY
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 7046ad6d3d0e..0ddfb5b5d53c 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_B44) += b44.o
+obj-$(CONFIG_BCM4908_ENET) += bcm4908_enet.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_BCMGENET) += genet/
obj-$(CONFIG_BNX2) += bnx2.o
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
new file mode 100644
index 000000000000..9be33dc98072
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "bcm4908_enet.h"
+#include "unimac.h"
+
+#define ENET_DMA_CH_RX_CFG ENET_DMA_CH0_CFG
+#define ENET_DMA_CH_TX_CFG ENET_DMA_CH1_CFG
+#define ENET_DMA_CH_RX_STATE_RAM ENET_DMA_CH0_STATE_RAM
+#define ENET_DMA_CH_TX_STATE_RAM ENET_DMA_CH1_STATE_RAM
+
+#define ENET_TX_BDS_NUM 200
+#define ENET_RX_BDS_NUM 200
+#define ENET_RX_BDS_NUM_MAX 8192
+
+#define ENET_DMA_INT_DEFAULTS (ENET_DMA_CH_CFG_INT_DONE | \
+ ENET_DMA_CH_CFG_INT_NO_DESC | \
+ ENET_DMA_CH_CFG_INT_BUFF_DONE)
+#define ENET_DMA_MAX_BURST_LEN 8 /* in 64 bit words */
+
+#define ENET_MTU_MAX ETH_DATA_LEN /* Is it possible to support 2044? */
+#define BRCM_MAX_TAG_LEN 6
+#define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
+ ETH_FCS_LEN + 4) /* 32 */
+
+struct bcm4908_enet_dma_ring_bd {
+ __le32 ctl;
+ __le32 addr;
+} __packed;
+
+struct bcm4908_enet_dma_ring_slot {
+ struct sk_buff *skb;
+ unsigned int len;
+ dma_addr_t dma_addr;
+};
+
+struct bcm4908_enet_dma_ring {
+ int is_tx;
+ int read_idx;
+ int write_idx;
+ int length;
+ u16 cfg_block;
+ u16 st_ram_block;
+
+ union {
+ void *cpu_addr;
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ };
+ dma_addr_t dma_addr;
+
+ struct bcm4908_enet_dma_ring_slot *slots;
+};
+
+struct bcm4908_enet {
+ struct device *dev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ void __iomem *base;
+
+ struct bcm4908_enet_dma_ring tx_ring;
+ struct bcm4908_enet_dma_ring rx_ring;
+};
+
+/***
+ * R/W ops
+ */
+
+static u32 enet_read(struct bcm4908_enet *enet, u16 offset)
+{
+ return readl(enet->base + offset);
+}
+
+static void enet_write(struct bcm4908_enet *enet, u16 offset, u32 value)
+{
+ writel(value, enet->base + offset);
+}
+
+static void enet_maskset(struct bcm4908_enet *enet, u16 offset, u32 mask, u32 set)
+{
+ u32 val;
+
+ WARN_ON(set & ~mask);
+
+ val = enet_read(enet, offset);
+ val = (val & ~mask) | (set & mask);
+ enet_write(enet, offset, val);
+}
+
+static void enet_set(struct bcm4908_enet *enet, u16 offset, u32 set)
+{
+ enet_maskset(enet, offset, set, set);
+}
+
+static u32 enet_umac_read(struct bcm4908_enet *enet, u16 offset)
+{
+ return enet_read(enet, ENET_UNIMAC + offset);
+}
+
+static void enet_umac_write(struct bcm4908_enet *enet, u16 offset, u32 value)
+{
+ enet_write(enet, ENET_UNIMAC + offset, value);
+}
+
+static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
+{
+ enet_set(enet, ENET_UNIMAC + offset, set);
+}
+
+/***
+ * Helpers
+ */
+
+static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
+}
+
+static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
+}
+
+static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
+}
+
+static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
+{
+ enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
+}
+
+/***
+ * DMA
+ */
+
+static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ int size = ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ struct device *dev = enet->dev;
+
+ ring->cpu_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL);
+ if (!ring->cpu_addr)
+ return -ENOMEM;
+
+ if (((uintptr_t)ring->cpu_addr) & (0x40 - 1)) {
+ dev_err(dev, "Invalid DMA ring alignment\n");
+ goto err_free_buf_descs;
+ }
+
+ ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
+ if (!ring->slots)
+ goto err_free_buf_descs;
+
+ ring->read_idx = 0;
+ ring->write_idx = 0;
+
+ return 0;
+
+err_free_buf_descs:
+ dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
+ return -ENOMEM;
+}
+
+static void bcm4908_enet_dma_free(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int size;
+
+ size = rx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ if (rx_ring->cpu_addr)
+ dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr);
+ kfree(rx_ring->slots);
+
+ size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ if (tx_ring->cpu_addr)
+ dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr);
+ kfree(tx_ring->slots);
+}
+
+static int bcm4908_enet_dma_alloc(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int err;
+
+ tx_ring->length = ENET_TX_BDS_NUM;
+ tx_ring->is_tx = 1;
+ tx_ring->cfg_block = ENET_DMA_CH_TX_CFG;
+ tx_ring->st_ram_block = ENET_DMA_CH_TX_STATE_RAM;
+ err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
+ if (err) {
+ dev_err(dev, "Failed to alloc TX buf descriptors: %d\n", err);
+ return err;
+ }
+
+ rx_ring->length = ENET_RX_BDS_NUM;
+ rx_ring->is_tx = 0;
+ rx_ring->cfg_block = ENET_DMA_CH_RX_CFG;
+ rx_ring->st_ram_block = ENET_DMA_CH_RX_STATE_RAM;
+ err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
+ if (err) {
+ dev_err(dev, "Failed to alloc RX buf descriptors: %d\n", err);
+ bcm4908_enet_dma_free(enet);
+ return err;
+ }
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_reset(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
+ int i;
+
+ /* Disable the DMA controller and channel */
+ for (i = 0; i < ARRAY_SIZE(rings); i++)
+ enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
+ enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
+
+ /* Reset channels state */
+ for (i = 0; i < ARRAY_SIZE(rings); i++) {
+ struct bcm4908_enet_dma_ring *ring = rings[i];
+
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
+ }
+}
+
+static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int idx)
+{
+ struct bcm4908_enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
+ struct bcm4908_enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
+ struct device *dev = enet->dev;
+ u32 tmp;
+ int err;
+
+ slot->len = ENET_MTU_MAX + ENET_MAX_ETH_OVERHEAD;
+
+ slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
+ if (!slot->skb)
+ return -ENOMEM;
+
+ slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
+ err = dma_mapping_error(dev, slot->dma_addr);
+ if (err) {
+ dev_err(dev, "Failed to map DMA buffer: %d\n", err);
+ kfree_skb(slot->skb);
+ slot->skb = NULL;
+ return err;
+ }
+
+ tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp |= DMA_CTL_STATUS_OWN;
+ if (idx == enet->rx_ring.length - 1)
+ tmp |= DMA_CTL_STATUS_WRAP;
+ buf_desc->ctl = cpu_to_le32(tmp);
+ buf_desc->addr = cpu_to_le32(slot->dma_addr);
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ int reset_channel = 0; /* We support only 1 main channel (with TX and RX) */
+ int reset_subch = ring->is_tx ? 1 : 0;
+
+ /* Reset the DMA channel */
+ enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
+ enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
+
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
+
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
+ (uint32_t)ring->dma_addr);
+}
+
+static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct bcm4908_enet_dma_ring_slot *slot;
+ struct device *dev = enet->dev;
+ int i;
+
+ for (i = rx_ring->length - 1; i >= 0; i--) {
+ slot = &rx_ring->slots[i];
+ if (!slot->skb)
+ continue;
+ dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
+ kfree_skb(slot->skb);
+ slot->skb = NULL;
+ }
+}
+
+static int bcm4908_enet_dma_init(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int err;
+ int i;
+
+ for (i = 0; i < rx_ring->length; i++) {
+ err = bcm4908_enet_dma_alloc_rx_buf(enet, i);
+ if (err) {
+ dev_err(dev, "Failed to alloc RX buffer: %d\n", err);
+ bcm4908_enet_dma_uninit(enet);
+ return err;
+ }
+ }
+
+ bcm4908_enet_dma_ring_init(enet, &enet->tx_ring);
+ bcm4908_enet_dma_ring_init(enet, &enet->rx_ring);
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_tx_ring_enable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
+}
+
+static void bcm4908_enet_dma_tx_ring_disable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
+}
+
+static void bcm4908_enet_dma_rx_ring_enable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
+}
+
+static void bcm4908_enet_dma_rx_ring_disable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ unsigned long deadline;
+ u32 tmp;
+
+ enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
+
+ deadline = jiffies + usecs_to_jiffies(2000);
+ do {
+ tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
+ if (!(tmp & ENET_DMA_CH_CFG_ENABLE))
+ return;
+ enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
+ usleep_range(10, 30);
+ } while (!time_after_eq(jiffies, deadline));
+
+ dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
+}
+
+/***
+ * Ethernet driver
+ */
+
+static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
+{
+ u32 cmd;
+
+ bcm4908_enet_set_mtu(enet, enet->netdev->mtu);
+
+ cmd = enet_umac_read(enet, UMAC_CMD);
+ enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
+ enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
+
+ enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
+ enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
+
+ enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
+ enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
+
+ cmd = enet_umac_read(enet, UMAC_CMD);
+ cmd &= ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT);
+ cmd &= ~CMD_TX_EN;
+ cmd &= ~CMD_RX_EN;
+ cmd |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
+ enet_umac_write(enet, UMAC_CMD, cmd);
+
+ enet_maskset(enet, ENET_GMAC_STATUS,
+ ENET_GMAC_STATUS_ETH_SPEED_MASK |
+ ENET_GMAC_STATUS_HD |
+ ENET_GMAC_STATUS_AUTO_CFG_EN |
+ ENET_GMAC_STATUS_LINK_UP,
+ ENET_GMAC_STATUS_ETH_SPEED_1000 |
+ ENET_GMAC_STATUS_AUTO_CFG_EN |
+ ENET_GMAC_STATUS_LINK_UP);
+}
+
+static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
+{
+ struct bcm4908_enet *enet = dev_id;
+
+ bcm4908_enet_intrs_off(enet);
+ bcm4908_enet_intrs_ack(enet);
+
+ napi_schedule(&enet->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm4908_enet_open(struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct device *dev = enet->dev;
+ int err;
+
+ err = request_irq(netdev->irq, bcm4908_enet_irq_handler, 0, "enet", enet);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", netdev->irq, err);
+ return err;
+ }
+
+ bcm4908_enet_gmac_init(enet);
+ bcm4908_enet_dma_reset(enet);
+ bcm4908_enet_dma_init(enet);
+
+ enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
+
+ enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
+ enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
+ bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
+
+ napi_enable(&enet->napi);
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+
+ bcm4908_enet_intrs_ack(enet);
+ bcm4908_enet_intrs_on(enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_stop(struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ napi_disable(&enet->napi);
+
+ bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
+ bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
+
+ bcm4908_enet_dma_uninit(enet);
+
+ free_irq(enet->netdev->irq, enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring_slot *slot;
+ struct device *dev = enet->dev;
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ int free_buf_descs;
+ u32 tmp;
+
+ /* Free transmitted skbs */
+ while (ring->read_idx != ring->write_idx) {
+ buf_desc = &ring->buf_desc[ring->read_idx];
+ if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
+ break;
+ slot = &ring->slots[ring->read_idx];
+
+ dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
+ dev_kfree_skb(slot->skb);
+ if (++ring->read_idx == ring->length)
+ ring->read_idx = 0;
+ }
+
+ /* Don't use the last empty buf descriptor */
+ if (ring->read_idx <= ring->write_idx)
+ free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
+ else
+ free_buf_descs = ring->read_idx - ring->write_idx;
+ if (free_buf_descs < 2)
+ return NETDEV_TX_BUSY;
+
+ /* Hardware removes OWN bit after sending data */
+ buf_desc = &ring->buf_desc[ring->write_idx];
+ if (unlikely(le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ slot = &ring->slots[ring->write_idx];
+ slot->skb = skb;
+ slot->len = skb->len;
+ slot->dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, slot->dma_addr)))
+ return NETDEV_TX_BUSY;
+
+ tmp = skb->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp |= DMA_CTL_STATUS_OWN;
+ tmp |= DMA_CTL_STATUS_SOP;
+ tmp |= DMA_CTL_STATUS_EOP;
+ tmp |= DMA_CTL_STATUS_APPEND_CRC;
+ if (ring->write_idx + 1 == ring->length - 1)
+ tmp |= DMA_CTL_STATUS_WRAP;
+
+ buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
+ buf_desc->ctl = cpu_to_le32(tmp);
+
+ bcm4908_enet_dma_tx_ring_enable(enet, &enet->tx_ring);
+
+ if (++ring->write_idx == ring->length - 1)
+ ring->write_idx = 0;
+ enet->netdev->stats.tx_bytes += skb->len;
+ enet->netdev->stats.tx_packets++;
+
+ return NETDEV_TX_OK;
+}
+
+static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
+{
+ struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
+ struct device *dev = enet->dev;
+ int handled = 0;
+
+ while (handled < weight) {
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ struct bcm4908_enet_dma_ring_slot slot;
+ u32 ctl;
+ int len;
+ int err;
+
+ buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
+ ctl = le32_to_cpu(buf_desc->ctl);
+ if (ctl & DMA_CTL_STATUS_OWN)
+ break;
+
+ slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
+
+ /* Provide new buffer before unpinning the old one */
+ err = bcm4908_enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
+ if (err)
+ break;
+
+ if (++enet->rx_ring.read_idx == enet->rx_ring.length)
+ enet->rx_ring.read_idx = 0;
+
+ len = (ctl & DMA_CTL_LEN_DESC_BUFLENGTH) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+
+ if (len < ETH_ZLEN ||
+ (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
+ enet->netdev->stats.rx_dropped++;
+ break;
+ }
+
+ dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
+
+ skb_put(slot.skb, len - ETH_FCS_LEN);
+ slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
+ netif_receive_skb(slot.skb);
+
+ enet->netdev->stats.rx_packets++;
+ enet->netdev->stats.rx_bytes += len;
+ }
+
+ if (handled < weight) {
+ napi_complete_done(napi, handled);
+ bcm4908_enet_intrs_on(enet);
+ }
+
+ return handled;
+}
+
+static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+
+ bcm4908_enet_set_mtu(enet, new_mtu);
+
+ return 0;
+}
+
+static const struct net_device_ops bcm4908_enet_netdev_ops = {
+ .ndo_open = bcm4908_enet_open,
+ .ndo_stop = bcm4908_enet_stop,
+ .ndo_start_xmit = bcm4908_enet_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = bcm4908_enet_change_mtu,
+};
+
+static int bcm4908_enet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct bcm4908_enet *enet;
+ int err;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(*enet));
+ if (!netdev)
+ return -ENOMEM;
+
+ enet = netdev_priv(netdev);
+ enet->dev = dev;
+ enet->netdev = netdev;
+
+ enet->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(enet->base)) {
+ dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
+ return PTR_ERR(enet->base);
+ }
+
+ netdev->irq = platform_get_irq_byname(pdev, "rx");
+ if (netdev->irq < 0)
+ return netdev->irq;
+
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+ err = bcm4908_enet_dma_alloc(enet);
+ if (err)
+ return err;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ eth_hw_addr_random(netdev);
+ netdev->netdev_ops = &bcm4908_enet_netdev_ops;
+ netdev->min_mtu = ETH_ZLEN;
+ netdev->mtu = ETH_DATA_LEN;
+ netdev->max_mtu = ENET_MTU_MAX;
+ netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
+
+ err = register_netdev(netdev);
+ if (err) {
+ bcm4908_enet_dma_free(enet);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_remove(struct platform_device *pdev)
+{
+ struct bcm4908_enet *enet = platform_get_drvdata(pdev);
+
+ unregister_netdev(enet->netdev);
+ netif_napi_del(&enet->napi);
+ bcm4908_enet_dma_free(enet);
+
+ return 0;
+}
+
+static const struct of_device_id bcm4908_enet_of_match[] = {
+ { .compatible = "brcm,bcm4908-enet"},
+ {},
+};
+
+static struct platform_driver bcm4908_enet_driver = {
+ .driver = {
+ .name = "bcm4908_enet",
+ .of_match_table = bcm4908_enet_of_match,
+ },
+ .probe = bcm4908_enet_probe,
+ .remove = bcm4908_enet_remove,
+};
+module_platform_driver(bcm4908_enet_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.h b/drivers/net/ethernet/broadcom/bcm4908_enet.h
new file mode 100644
index 000000000000..8a3ede2da537
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __BCM4908_ENET_H
+#define __BCM4908_ENET_H
+
+#define ENET_CONTROL 0x000
+#define ENET_MIB_CTRL 0x004
+#define ENET_MIB_CTRL_CLR_MIB 0x00000001
+#define ENET_RX_ERR_MASK 0x008
+#define ENET_MIB_MAX_PKT_SIZE 0x00C
+#define ENET_MIB_MAX_PKT_SIZE_VAL 0x00003fff
+#define ENET_DIAG_OUT 0x01c
+#define ENET_ENABLE_DROP_PKT 0x020
+#define ENET_IRQ_ENABLE 0x024
+#define ENET_IRQ_ENABLE_OVFL 0x00000001
+#define ENET_GMAC_STATUS 0x028
+#define ENET_GMAC_STATUS_ETH_SPEED_MASK 0x00000003
+#define ENET_GMAC_STATUS_ETH_SPEED_10 0x00000000
+#define ENET_GMAC_STATUS_ETH_SPEED_100 0x00000001
+#define ENET_GMAC_STATUS_ETH_SPEED_1000 0x00000002
+#define ENET_GMAC_STATUS_HD 0x00000004
+#define ENET_GMAC_STATUS_AUTO_CFG_EN 0x00000008
+#define ENET_GMAC_STATUS_LINK_UP 0x00000010
+#define ENET_IRQ_STATUS 0x02c
+#define ENET_IRQ_STATUS_OVFL 0x00000001
+#define ENET_OVERFLOW_COUNTER 0x030
+#define ENET_FLUSH 0x034
+#define ENET_FLUSH_RXFIFO_FLUSH 0x00000001
+#define ENET_FLUSH_TXFIFO_FLUSH 0x00000002
+#define ENET_RSV_SELECT 0x038
+#define ENET_BP_FORCE 0x03c
+#define ENET_BP_FORCE_FORCE 0x00000001
+#define ENET_DMA_RX_OK_TO_SEND_COUNT 0x040
+#define ENET_DMA_RX_OK_TO_SEND_COUNT_VAL 0x0000000f
+#define ENET_TX_CRC_CTRL 0x044
+#define ENET_MIB 0x200
+#define ENET_UNIMAC 0x400
+#define ENET_DMA 0x800
+#define ENET_DMA_CONTROLLER_CFG 0x800
+#define ENET_DMA_CTRL_CFG_MASTER_EN 0x00000001
+#define ENET_DMA_CTRL_CFG_FLOWC_CH1_EN 0x00000002
+#define ENET_DMA_CTRL_CFG_FLOWC_CH3_EN 0x00000004
+#define ENET_DMA_FLOWCTL_CH1_THRESH_LO 0x804
+#define ENET_DMA_FLOWCTL_CH1_THRESH_HI 0x808
+#define ENET_DMA_FLOWCTL_CH1_ALLOC 0x80c
+#define ENET_DMA_FLOWCTL_CH1_ALLOC_FORCE 0x80000000
+#define ENET_DMA_FLOWCTL_CH3_THRESH_LO 0x810
+#define ENET_DMA_FLOWCTL_CH3_THRESH_HI 0x814
+#define ENET_DMA_FLOWCTL_CH3_ALLOC 0x818
+#define ENET_DMA_FLOWCTL_CH5_THRESH_LO 0x81C
+#define ENET_DMA_FLOWCTL_CH5_THRESH_HI 0x820
+#define ENET_DMA_FLOWCTL_CH5_ALLOC 0x824
+#define ENET_DMA_FLOWCTL_CH7_THRESH_LO 0x828
+#define ENET_DMA_FLOWCTL_CH7_THRESH_HI 0x82C
+#define ENET_DMA_FLOWCTL_CH7_ALLOC 0x830
+#define ENET_DMA_CTRL_CHANNEL_RESET 0x834
+#define ENET_DMA_CTRL_CHANNEL_DEBUG 0x838
+#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_STATUS 0x840
+#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_MASK 0x844
+#define ENET_DMA_CH0_CFG 0xa00 /* RX */
+#define ENET_DMA_CH1_CFG 0xa10 /* TX */
+#define ENET_DMA_CH0_STATE_RAM 0xc00 /* RX */
+#define ENET_DMA_CH1_STATE_RAM 0xc10 /* TX */
+
+#define ENET_DMA_CH_CFG 0x00 /* assorted configuration */
+#define ENET_DMA_CH_CFG_ENABLE 0x00000001 /* set to enable channel */
+#define ENET_DMA_CH_CFG_PKT_HALT 0x00000002 /* idle after an EOP flag is detected */
+#define ENET_DMA_CH_CFG_BURST_HALT 0x00000004 /* idle after finish current memory burst */
+#define ENET_DMA_CH_CFG_INT_STAT 0x04 /* interrupts control and status */
+#define ENET_DMA_CH_CFG_INT_MASK 0x08 /* interrupts mask */
+#define ENET_DMA_CH_CFG_INT_BUFF_DONE 0x00000001 /* buffer done */
+#define ENET_DMA_CH_CFG_INT_DONE 0x00000002 /* packet xfer complete */
+#define ENET_DMA_CH_CFG_INT_NO_DESC 0x00000004 /* no valid descriptors */
+#define ENET_DMA_CH_CFG_INT_RX_ERROR 0x00000008 /* rxdma detect client protocol error */
+#define ENET_DMA_CH_CFG_MAX_BURST 0x0c /* max burst length permitted */
+#define ENET_DMA_CH_CFG_MAX_BURST_DESCSIZE_SEL 0x00040000 /* DMA Descriptor Size Selection */
+#define ENET_DMA_CH_CFG_SIZE 0x10
+
+#define ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR 0x00 /* descriptor ring start address */
+#define ENET_DMA_CH_STATE_RAM_STATE_DATA 0x04 /* state/bytes done/ring offset */
+#define ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS 0x08 /* buffer descriptor status and len */
+#define ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR 0x0c /* buffer descrpitor current processing */
+#define ENET_DMA_CH_STATE_RAM_SIZE 0x10
+
+#define DMA_CTL_STATUS_APPEND_CRC 0x00000100
+#define DMA_CTL_STATUS_APPEND_BRCM_TAG 0x00000200
+#define DMA_CTL_STATUS_PRIO 0x00000C00 /* Prio for Tx */
+#define DMA_CTL_STATUS_WRAP 0x00001000 /* */
+#define DMA_CTL_STATUS_SOP 0x00002000 /* first buffer in packet */
+#define DMA_CTL_STATUS_EOP 0x00004000 /* last buffer in packet */
+#define DMA_CTL_STATUS_OWN 0x00008000 /* cleared by DMA, set by SW */
+#define DMA_CTL_LEN_DESC_BUFLENGTH 0x0fff0000
+#define DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT 16
+#define DMA_CTL_LEN_DESC_MULTICAST 0x40000000
+#define DMA_CTL_LEN_DESC_USEFPM 0x80000000
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 916824cca3fd..fd8767213165 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -220,7 +220,7 @@ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
/*
* refill rx queue
*/
-static int bcm_enet_refill_rx(struct net_device *dev)
+static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
{
struct bcm_enet_priv *priv;
@@ -228,26 +228,29 @@ static int bcm_enet_refill_rx(struct net_device *dev)
while (priv->rx_desc_count < priv->rx_ring_size) {
struct bcm_enet_desc *desc;
- struct sk_buff *skb;
- dma_addr_t p;
int desc_idx;
u32 len_stat;
desc_idx = priv->rx_dirty_desc;
desc = &priv->rx_desc_cpu[desc_idx];
- if (!priv->rx_skb[desc_idx]) {
- skb = netdev_alloc_skb(dev, priv->rx_skb_size);
- if (!skb)
+ if (!priv->rx_buf[desc_idx]) {
+ void *buf;
+
+ if (likely(napi_mode))
+ buf = napi_alloc_frag(priv->rx_frag_size);
+ else
+ buf = netdev_alloc_frag(priv->rx_frag_size);
+ if (unlikely(!buf))
break;
- priv->rx_skb[desc_idx] = skb;
- p = dma_map_single(&priv->pdev->dev, skb->data,
- priv->rx_skb_size,
- DMA_FROM_DEVICE);
- desc->address = p;
+ priv->rx_buf[desc_idx] = buf;
+ desc->address = dma_map_single(&priv->pdev->dev,
+ buf + priv->rx_buf_offset,
+ priv->rx_buf_size,
+ DMA_FROM_DEVICE);
}
- len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
+ len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
len_stat |= DMADESC_OWNER_MASK;
if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
@@ -287,7 +290,7 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, false);
spin_unlock(&priv->rx_lock);
}
@@ -297,10 +300,12 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
struct bcm_enet_priv *priv;
+ struct list_head rx_list;
struct device *kdev;
int processed;
priv = netdev_priv(dev);
+ INIT_LIST_HEAD(&rx_list);
kdev = &priv->pdev->dev;
processed = 0;
@@ -315,6 +320,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
int desc_idx;
u32 len_stat;
unsigned int len;
+ void *buf;
desc_idx = priv->rx_curr_desc;
desc = &priv->rx_desc_cpu[desc_idx];
@@ -333,7 +339,6 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_curr_desc++;
if (priv->rx_curr_desc == priv->rx_ring_size)
priv->rx_curr_desc = 0;
- priv->rx_desc_count--;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
@@ -360,16 +365,14 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
/* valid packet */
- skb = priv->rx_skb[desc_idx];
+ buf = priv->rx_buf[desc_idx];
len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
/* don't include FCS */
len -= 4;
if (len < copybreak) {
- struct sk_buff *nskb;
-
- nskb = napi_alloc_skb(&priv->napi, len);
- if (!nskb) {
+ skb = napi_alloc_skb(&priv->napi, len);
+ if (unlikely(!skb)) {
/* forget packet, just rearm desc */
dev->stats.rx_dropped++;
continue;
@@ -377,26 +380,36 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
dma_sync_single_for_cpu(kdev, desc->address,
len, DMA_FROM_DEVICE);
- memcpy(nskb->data, skb->data, len);
+ memcpy(skb->data, buf + priv->rx_buf_offset, len);
dma_sync_single_for_device(kdev, desc->address,
len, DMA_FROM_DEVICE);
- skb = nskb;
} else {
- dma_unmap_single(&priv->pdev->dev, desc->address,
- priv->rx_skb_size, DMA_FROM_DEVICE);
- priv->rx_skb[desc_idx] = NULL;
+ dma_unmap_single(kdev, desc->address,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ priv->rx_buf[desc_idx] = NULL;
+
+ skb = build_skb(buf, priv->rx_frag_size);
+ if (unlikely(!skb)) {
+ skb_free_frag(buf);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, priv->rx_buf_offset);
}
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- netif_receive_skb(skb);
+ list_add_tail(&skb->list, &rx_list);
- } while (--budget > 0);
+ } while (processed < budget);
+
+ netif_receive_skb_list(&rx_list);
+ priv->rx_desc_count -= processed;
if (processed || !priv->rx_desc_count) {
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, true);
/* kick rx dma */
enet_dmac_writel(priv, priv->dma_chan_en_mask,
@@ -413,9 +426,11 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
{
struct bcm_enet_priv *priv;
+ unsigned int bytes;
int released;
priv = netdev_priv(dev);
+ bytes = 0;
released = 0;
while (priv->tx_desc_count < priv->tx_ring_size) {
@@ -452,10 +467,13 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
if (desc->len_stat & DMADESC_UNDER_MASK)
dev->stats.tx_errors++;
+ bytes += skb->len;
dev_kfree_skb(skb);
released++;
}
+ netdev_completed_queue(dev, released, bytes);
+
if (netif_queue_stopped(dev) && released)
netif_wake_queue(dev);
@@ -622,8 +640,11 @@ bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc->len_stat = len_stat;
wmb();
+ netdev_sent_queue(dev, skb->len);
+
/* kick tx dma */
- enet_dmac_writel(priv, priv->dma_chan_en_mask,
+ if (!netdev_xmit_more() || !priv->tx_desc_count)
+ enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->tx_chan);
/* stop queue if no more desc available */
@@ -845,6 +866,24 @@ static void bcm_enet_adjust_link(struct net_device *dev)
priv->pause_tx ? "tx" : "off");
}
+static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct bcm_enet_desc *desc;
+
+ if (!priv->rx_buf[i])
+ continue;
+
+ desc = &priv->rx_desc_cpu[i];
+ dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
+ DMA_FROM_DEVICE);
+ skb_free_frag(priv->rx_buf[i]);
+ }
+ kfree(priv->rx_buf);
+}
+
/*
* open callback, allocate dma rings & buffers and start rx operation
*/
@@ -954,10 +993,10 @@ static int bcm_enet_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
+ if (!priv->rx_buf) {
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -974,8 +1013,8 @@ static int bcm_enet_open(struct net_device *dev)
enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMAC_BUFALLOC, priv->rx_chan);
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -1069,18 +1108,7 @@ static int bcm_enet_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -1159,12 +1187,12 @@ static int bcm_enet_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
@@ -1186,21 +1214,10 @@ static int bcm_enet_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -1622,9 +1639,12 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
* align rx buffer size to dma burst len, account FCS since
* it's appended
*/
- priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
+ priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
priv->dma_maxburst * 4);
+ priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
dev->mtu = new_mtu;
return 0;
}
@@ -1709,6 +1729,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->enet_is_sw = false;
priv->dma_maxburst = BCMENET_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD;
ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
@@ -2126,7 +2147,7 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ dev_err(kdev, "cannot allocate tx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
@@ -2136,11 +2157,11 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (!priv->rx_buf) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -2187,8 +2208,8 @@ static int bcm_enetsw_open(struct net_device *dev)
enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMA_BUFALLOC_REG(priv->rx_chan));
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -2287,18 +2308,7 @@ static int bcm_enetsw_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -2327,13 +2337,13 @@ static int bcm_enetsw_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
del_timer_sync(&priv->swphy_poll);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
napi_disable(&priv->napi);
del_timer_sync(&priv->rx_timeout);
@@ -2348,21 +2358,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -2659,6 +2658,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
pd = dev_get_platdata(&pdev->dev);
if (pd) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 1d3c917eb830..78f1830fb3cb 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -230,11 +230,17 @@ struct bcm_enet_priv {
/* next dirty rx descriptor to refill */
int rx_dirty_desc;
- /* size of allocated rx skbs */
- unsigned int rx_skb_size;
+ /* size of allocated rx buffers */
+ unsigned int rx_buf_size;
- /* list of skb given to hw for rx */
- struct sk_buff **rx_skb;
+ /* allocated rx buffer offset */
+ unsigned int rx_buf_offset;
+
+ /* size of allocated rx frag */
+ unsigned int rx_frag_size;
+
+ /* list of buffer given to hw for rx */
+ void **rx_buf;
/* used when rx skb allocation failed, so we defer rx queue
* refill */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0fdd19d99d99..777bbf6d2586 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -2310,33 +2311,22 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_select_queue = bcm_sysport_select_queue,
};
-static int bcm_sysport_map_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_map_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
unsigned int q, qp, port;
- struct net_device *dev;
-
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
/* We can't be setting up queue inspection for non directly attached
* switches
*/
- if (info->switch_number)
+ if (dp->ds->index)
return 0;
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
* 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
@@ -2376,27 +2366,16 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_unmap_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_unmap_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
- struct net_device *dev;
unsigned int q, qp, port;
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
-
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
num_tx_queues = slave_dev->real_num_tx_queues;
@@ -2417,17 +2396,30 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int bcm_sysport_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
- int ret = NOTIFY_DONE;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct bcm_sysport_priv *priv;
+ int ret = 0;
+
+ priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
+ if (priv->netdev != dev)
+ return NOTIFY_DONE;
switch (event) {
- case DSA_PORT_REGISTER:
- ret = bcm_sysport_map_queues(nb, ptr);
- break;
- case DSA_PORT_UNREGISTER:
- ret = bcm_sysport_unmap_queues(nb, ptr);
+ case NETDEV_CHANGEUPPER:
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return NOTIFY_DONE;
+
+ if (!dsa_slave_dev_check(info->upper_dev))
+ return NOTIFY_DONE;
+
+ if (info->linking)
+ ret = bcm_sysport_map_queues(dev, info->upper_dev);
+ else
+ ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
break;
}
@@ -2503,8 +2495,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_free_netdev;
+ }
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
@@ -2577,6 +2571,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
@@ -2599,9 +2594,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rx_max_coalesced_frames = 1;
u64_stats_init(&priv->syncp);
- priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
+ priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
- ret = register_dsa_notifier(&priv->dsa_notifier);
+ ret = register_netdevice_notifier(&priv->netdev_notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register DSA notifier\n");
goto err_deregister_fixed_link;
@@ -2628,7 +2623,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
err_deregister_notifier:
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
@@ -2646,7 +2641,7 @@ static int bcm_sysport_remove(struct platform_device *pdev)
/* Not much to do, ndo_close has been called
* and we use managed allocations
*/
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
unregister_netdev(dev);
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 3a5cb6f128f5..984f76e74b43 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/dim.h>
+#include "unimac.h"
+
/* Receive/transmit descriptor format */
#define DESC_ADDR_HI_STATUS_LEN 0x00
#define DESC_ADDR_HI_SHIFT 0
@@ -213,39 +215,6 @@ struct bcm_rsb {
/* UniMAC offset and defines */
#define SYS_PORT_UMAC_OFFSET 0x800
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_10 0
-#define CMD_SPEED_100 1
-#define CMD_SPEED_1000 2
-#define CMD_SPEED_2500 3
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00c
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
@@ -787,7 +756,7 @@ struct bcm_sysport_priv {
struct u64_stats_sync syncp;
/* map information between switch port queues and local queues */
- struct notifier_block dsa_notifier;
+ struct notifier_block netdev_notifier;
unsigned int per_port_num_tx_queues;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 98ec1b8a7d8e..075f6e146b29 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -746,25 +746,25 @@ error:
/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
* nothing to change? Try if after stabilizng driver.
*/
-static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
- bool force)
+static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+ bool force)
{
- u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ u32 cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
u32 new_val = (cmdcfg & mask) | set;
u32 cmdcfg_sr;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~0, cmdcfg_sr);
udelay(2);
if (new_val != cmdcfg || force)
- bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+ bgmac_umac_write(bgmac, UMAC_CMD, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~cmdcfg_sr, 0);
udelay(2);
}
@@ -773,9 +773,9 @@ static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
u32 tmp;
tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
- bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC0, tmp);
tmp = (addr[4] << 8) | addr[5];
- bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC1, tmp);
}
static void bgmac_set_rx_mode(struct net_device *net_dev)
@@ -783,9 +783,9 @@ static void bgmac_set_rx_mode(struct net_device *net_dev)
struct bgmac *bgmac = netdev_priv(net_dev);
if (net_dev->flags & IFF_PROMISC)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_PROMISC, true);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_PROMISC, 0, true);
}
#if 0 /* We don't use that regs yet */
@@ -825,21 +825,21 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
static void bgmac_mac_speed(struct bgmac *bgmac)
{
- u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+ u32 mask = ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT | CMD_HD_EN);
u32 set = 0;
switch (bgmac->mac_speed) {
case SPEED_10:
- set |= BGMAC_CMDCFG_ES_10;
+ set |= CMD_SPEED_10 << CMD_SPEED_SHIFT;
break;
case SPEED_100:
- set |= BGMAC_CMDCFG_ES_100;
+ set |= CMD_SPEED_100 << CMD_SPEED_SHIFT;
break;
case SPEED_1000:
- set |= BGMAC_CMDCFG_ES_1000;
+ set |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
break;
case SPEED_2500:
- set |= BGMAC_CMDCFG_ES_2500;
+ set |= CMD_SPEED_2500 << CMD_SPEED_SHIFT;
break;
default:
dev_err(bgmac->dev, "Unsupported speed: %d\n",
@@ -847,9 +847,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
}
if (bgmac->mac_duplex == DUPLEX_HALF)
- set |= BGMAC_CMDCFG_HD;
+ set |= CMD_HD_EN;
- bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+ bgmac_umac_cmd_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
@@ -917,7 +917,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
udelay(1);
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
@@ -986,34 +986,34 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
- * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
- * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+ * Specs don't say about using UMAC_CMD_SR, but in this routine
+ * UMAC_CMD is read _after_ putting chip in a reset. So it has to
* be keps until taking MAC out of the reset.
*/
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
-
- bgmac_cmdcfg_maskset(bgmac,
- ~(BGMAC_CMDCFG_TE |
- BGMAC_CMDCFG_RE |
- BGMAC_CMDCFG_RPI |
- BGMAC_CMDCFG_TAI |
- BGMAC_CMDCFG_HD |
- BGMAC_CMDCFG_ML |
- BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_RL |
- BGMAC_CMDCFG_RED |
- BGMAC_CMDCFG_PE |
- BGMAC_CMDCFG_TPI |
- BGMAC_CMDCFG_PAD_EN |
- BGMAC_CMDCFG_PF),
- BGMAC_CMDCFG_PROM |
- BGMAC_CMDCFG_NLC |
- BGMAC_CMDCFG_CFE |
- cmdcfg_sr,
- false);
+ cmdcfg_sr = CMD_SW_RESET_OLD;
+
+ bgmac_umac_cmd_maskset(bgmac,
+ ~(CMD_TX_EN |
+ CMD_RX_EN |
+ CMD_RX_PAUSE_IGNORE |
+ CMD_TX_ADDR_INS |
+ CMD_HD_EN |
+ CMD_LCL_LOOP_EN |
+ CMD_CNTL_FRM_EN |
+ CMD_RMT_LOOP_EN |
+ CMD_RX_ERR_DISC |
+ CMD_PRBL_EN |
+ CMD_TX_PAUSE_IGNORE |
+ CMD_PAD_EN |
+ CMD_PAUSE_FWD),
+ CMD_PROMISC |
+ CMD_NO_LEN_CHK |
+ CMD_CNTL_FRM_EN |
+ cmdcfg_sr,
+ false);
bgmac->mac_speed = SPEED_UNKNOWN;
bgmac->mac_duplex = DUPLEX_UNKNOWN;
@@ -1049,16 +1049,16 @@ static void bgmac_enable(struct bgmac *bgmac)
u32 mode;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
- bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- cmdcfg_sr, true);
+ cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
+ bgmac_umac_cmd_maskset(bgmac, ~(CMD_TX_EN | CMD_RX_EN),
+ cmdcfg_sr, true);
udelay(2);
- cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
- bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+ cmdcfg |= CMD_TX_EN | CMD_RX_EN;
+ bgmac_umac_write(bgmac, UMAC_CMD, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
@@ -1078,7 +1078,7 @@ static void bgmac_enable(struct bgmac *bgmac)
fl_ctl = 0x03cb04cb;
bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
- bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+ bgmac_umac_write(bgmac, UMAC_PAUSE_CTRL, 0x27fff);
}
if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
@@ -1105,18 +1105,18 @@ static void bgmac_chip_init(struct bgmac *bgmac)
bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
/* Enable 802.3x tx flow control (honor received PAUSE frames) */
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_RX_PAUSE_IGNORE, 0, true);
bgmac_set_rx_mode(bgmac->net_dev);
bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
if (bgmac->loopback)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_LCL_LOOP_EN, 0, false);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + ETHER_MAX_LEN);
bgmac_chip_intrs_on(bgmac);
@@ -1252,7 +1252,7 @@ static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + mtu);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 351c598a3ec6..110088e662ea 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -4,6 +4,8 @@
#include <linux/netdevice.h>
+#include "unimac.h"
+
#define BGMAC_DEV_CTL 0x000
#define BGMAC_DC_TSM 0x00000002
#define BGMAC_DC_CFCO 0x00000004
@@ -169,47 +171,7 @@
#define BGMAC_RX_NONPAUSE_PKTS 0x420
#define BGMAC_RX_SACHANGES 0x424
#define BGMAC_RX_UNI_PKTS 0x428
-#define BGMAC_UNIMAC_VERSION 0x800
-#define BGMAC_HDBKP_CTL 0x804
-#define BGMAC_CMDCFG 0x808 /* Configuration */
-#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
-#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
-#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
-#define BGMAC_CMDCFG_ES_10 0x00000000
-#define BGMAC_CMDCFG_ES_100 0x00000004
-#define BGMAC_CMDCFG_ES_1000 0x00000008
-#define BGMAC_CMDCFG_ES_2500 0x0000000C
-#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
-#define BGMAC_CMDCFG_PAD_EN 0x00000020
-#define BGMAC_CMDCFG_CF 0x00000040
-#define BGMAC_CMDCFG_PF 0x00000080
-#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
-#define BGMAC_CMDCFG_TAI 0x00000200
-#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
-#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
-#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
-#define BGMAC_CMDCFG_AE 0x00400000
-#define BGMAC_CMDCFG_CFE 0x00800000
-#define BGMAC_CMDCFG_NLC 0x01000000
-#define BGMAC_CMDCFG_RL 0x02000000
-#define BGMAC_CMDCFG_RED 0x04000000
-#define BGMAC_CMDCFG_PE 0x08000000
-#define BGMAC_CMDCFG_TPI 0x10000000
-#define BGMAC_CMDCFG_AT 0x20000000
-#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
-#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
-#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
-#define BGMAC_PAUSEQUANTA 0x818
-#define BGMAC_MAC_MODE 0x844
-#define BGMAC_OUTERTAG 0x848
-#define BGMAC_INNERTAG 0x84c
-#define BGMAC_TXIPG 0x85c
-#define BGMAC_PAUSE_CTL 0xb30
-#define BGMAC_TX_FLUSH 0xb34
-#define BGMAC_RX_STATUS 0xb38
-#define BGMAC_TX_STATUS 0xb3c
+#define BGMAC_UNIMAC 0x800
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
@@ -556,6 +518,16 @@ static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
bgmac->write(bgmac, offset, value);
}
+static inline u32 bgmac_umac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bgmac_read(bgmac, BGMAC_UNIMAC + offset);
+}
+
+static inline void bgmac_umac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bgmac_write(bgmac, BGMAC_UNIMAC + offset, value);
+}
+
static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset)
{
return bgmac->idm_read(bgmac, offset);
@@ -609,6 +581,11 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
bgmac_maskset(bgmac, offset, ~0, set);
}
+static inline void bgmac_umac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, u32 set)
+{
+ bgmac_maskset(bgmac, BGMAC_UNIMAC + offset, mask, set);
+}
+
static inline int bgmac_phy_connect(struct bgmac *bgmac)
{
return bgmac->phy_connect(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 28069b290862..b652ed72a621 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13071,8 +13071,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4edd6f8e017e..a680fd9c68ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -255,7 +255,9 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
+ ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
+ ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -1265,8 +1267,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
} else {
tpa_info->hash_type = PKT_HASH_TYPE_NONE;
tpa_info->gso_type = 0;
- if (netif_msg_rx_err(bp))
- netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
@@ -2021,10 +2022,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
- case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
- if (netif_msg_hw(bp))
- netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n",
- data1, data2);
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
+ char *fatal_str = "non-fatal";
+
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2036,14 +2036,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
- netdev_warn(bp->dev, "Firmware fatal reset event received\n");
+ fatal_str = "fatal";
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
- } else {
- netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
- bp->fw_reset_max_dsecs * 100);
}
+ netif_warn(bp, hw, bp->dev,
+ "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ fatal_str, data1, data2,
+ bp->fw_reset_min_dsecs * 100,
+ bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
break;
+ }
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -2052,16 +2055,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
- if (!fw_health->enabled)
+ if (!fw_health->enabled) {
+ netif_info(bp, drv, bp->dev,
+ "Error recovery info: error recovery[0]\n");
break;
-
- if (netif_msg_drv(bp))
- netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
- fw_health->enabled, fw_health->master,
- bnxt_fw_health_readl(bp,
- BNXT_FW_RESET_CNT_REG),
- bnxt_fw_health_readl(bp,
- BNXT_FW_HEALTH_REG));
+ }
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
@@ -2070,8 +2068,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ netif_info(bp, drv, bp->dev,
+ "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
+ fw_health->master, fw_health->last_fw_reset_cnt,
+ bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ goto async_event_process_exit;
case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
struct bnxt_rx_ring_info *rxr;
u16 grp_idx;
@@ -2094,6 +2101,20 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_sched_reset(bp, rxr);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ if (fw_health) {
+ fw_health->echo_req_data1 = data1;
+ fw_health->echo_req_data2 = data2;
+ set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
+ break;
+ }
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -2394,6 +2415,10 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
while (1) {
work_done += bnxt_poll_work(bp, cpr, budget - work_done);
@@ -2468,6 +2493,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
int work_done = 0;
u32 cons;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
if (cpr->has_more_work) {
cpr->has_more_work = 0;
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
@@ -2675,6 +2704,23 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
+static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+{
+ u8 init_val = mem_init->init_val;
+ u16 offset = mem_init->offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNXT_MEM_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += mem_init->size)
+ *(p2 + i + offset) = init_val;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
@@ -2734,9 +2780,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->init_val)
- memset(rmem->pg_arr[i], rmem->init_val,
- rmem->page_size);
+ if (rmem->mem_init)
+ bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+ rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
@@ -4272,6 +4318,9 @@ static void bnxt_disable_int_sync(struct bnxt *bp)
{
int i;
+ if (!bp->irq_tbl)
+ return;
+
atomic_inc(&bp->intr_sem);
bnxt_disable_int(bp);
@@ -4425,6 +4474,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
+ /* Limit timeout to an upper limit */
+ timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
@@ -6732,6 +6783,39 @@ func_qcfg_exit:
return rc;
}
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
+ struct hwrm_func_backing_store_qcaps_output *resp)
+{
+ struct bnxt_mem_init *mem_init;
+ u16 init_mask;
+ u8 init_val;
+ u8 *offset;
+ int i;
+
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+ offset = &resp->qp_init_offset;
+ mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+ for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
+ mem_init->init_val = init_val;
+ mem_init->offset = BNXT_MEM_INVALID_OFFSET;
+ if (!init_mask)
+ continue;
+ if (i == BNXT_CTX_MEM_INIT_STAT)
+ offset = &resp->stat_init_offset;
+ if (init_mask & (1 << i))
+ mem_init->offset = *offset * 4;
+ else
+ mem_init->init_val = 0;
+ }
+ ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
+}
+
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_input req = {0};
@@ -6786,12 +6870,16 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
- ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+
+ bnxt_init_ctx_initializer(ctx, resp);
+
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + 1;
+ tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@@ -6814,6 +6902,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
{
u8 pg_size = 0;
+ if (!rmem->nr_pages)
+ return;
+
if (BNXT_PAGE_SHIFT == 13)
pg_size = 1 << 4;
else if (BNXT_PAGE_SIZE == 16)
@@ -6843,6 +6934,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input req = {0};
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ u32 req_len = sizeof(req);
__le32 *num_entries;
__le64 *pg_dir;
u32 flags = 0;
@@ -6853,6 +6945,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (!ctx)
return 0;
+ if (req_len > bp->hwrm_max_ext_req_len)
+ req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
req.enables = cpu_to_le32(enables);
@@ -6925,7 +7019,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ i < BNXT_MAX_TQM_RINGS;
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@@ -6935,7 +7030,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -6954,7 +7049,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth, bool use_init_val)
+ u8 depth, struct bnxt_mem_init *mem_init)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -6992,8 +7087,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- if (use_init_val)
- rmem->init_val = bp->ctx->ctx_kind_initializer;
+ rmem->mem_init = mem_init;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -7008,8 +7102,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- if (use_init_val)
- rmem->init_val = bp->ctx->ctx_kind_initializer;
+ rmem->mem_init = mem_init;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -7073,6 +7166,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
+ struct bnxt_mem_init *init;
u32 mem_size, ena, entries;
u32 entries_sp, min;
u32 num_mr, num_ah;
@@ -7100,39 +7194,54 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps;
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->qp_entry_size) {
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->srq_entry_size) {
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->cq_entry_size) {
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->vnic_mem;
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
- if (rc)
- return rc;
+ if (ctx->vnic_entry_size) {
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
- if (rc)
- return rc;
+ if (ctx->stat_entry_size) {
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
+ if (rc)
+ return rc;
+ }
ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
@@ -7145,10 +7254,13 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
num_mr = 1024 * 256;
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
- if (rc)
- return rc;
+ if (ctx->mrav_entry_size) {
+ mem_size = ctx->mrav_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
+ if (rc)
+ return rc;
+ }
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries =
@@ -7157,10 +7269,12 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
- if (rc)
- return rc;
+ if (ctx->tim_entry_size) {
+ mem_size = ctx->tim_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
+ if (rc)
+ return rc;
+ }
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
@@ -7174,10 +7288,13 @@ skip_rdma:
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = i ? entries : entries_sp;
- mem_size = ctx->tqm_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
- if (rc)
- return rc;
+ if (ctx->tqm_entry_size) {
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
+ NULL);
+ if (rc)
+ return rc;
+ }
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
@@ -7435,9 +7552,22 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
- if (bp->fw_health)
- bp->fw_health->status_reliable = false;
- return;
+ if (!bp->chip_num) {
+ __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
+ bp->chip_num = readl(bp->bar0 +
+ BNXT_FW_HEALTH_WIN_BASE +
+ BNXT_GRC_REG_CHIP_NUM);
+ }
+ if (!BNXT_CHIP_P5(bp)) {
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+ return;
+ }
+ status_loc = BNXT_GRC_REG_STATUS_P5 |
+ BNXT_FW_HEALTH_REG_TYPE_BAR0;
+ } else {
+ status_loc = readl(hs + offsetof(struct hcomm_status,
+ fw_status_loc));
}
if (__bnxt_alloc_fw_health(bp)) {
@@ -7445,7 +7575,6 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
return;
}
- status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
@@ -8600,7 +8729,7 @@ msix_setup_exit:
static int bnxt_init_inta(struct bnxt *bp)
{
- bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
if (!bp->irq_tbl)
return -ENOMEM;
@@ -8808,7 +8937,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
{
int i;
- if (!bp->bnapi)
+ if (!bp->bnapi ||
+ test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -8825,6 +8955,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
{
int i;
+ clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
@@ -8853,9 +8984,10 @@ void bnxt_tx_disable(struct bnxt *bp)
txr->dev_state = BNXT_DEV_STATE_CLOSING;
}
}
+ /* Drop carrier first to prevent TX timeout */
+ netif_carrier_off(bp->dev);
/* Stop all TX queues */
netif_tx_disable(bp->dev);
- netif_carrier_off(bp->dev);
}
void bnxt_tx_enable(struct bnxt *bp)
@@ -9331,13 +9463,60 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
static int bnxt_fw_init_one(struct bnxt *bp);
+static int bnxt_fw_reset_via_optee(struct bnxt *bp)
+{
+#ifdef CONFIG_TEE_BNXT_FW
+ int rc = tee_bnxt_fw_load();
+
+ if (rc)
+ netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
+
+ return rc;
+#else
+ netdev_err(bp->dev, "OP-TEE not supported\n");
+ return -ENODEV;
+#endif
+}
+
+static int bnxt_try_recover_fw(struct bnxt *bp)
+{
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ int retry = 0, rc;
+ u32 sts;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ do {
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (!sts || !BNXT_FW_IS_BOOTING(sts))
+ break;
+ retry++;
+ } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (!BNXT_FW_IS_HEALTHY(sts)) {
+ netdev_err(bp->dev,
+ "Firmware not responding, status: 0x%x\n",
+ sts);
+ rc = -ENODEV;
+ }
+ if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
+ netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
+ return bnxt_fw_reset_via_optee(bp);
+ }
+ return rc;
+ }
+
+ return -ENODEV;
+}
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_if_change_input req = {0};
bool resc_reinit = false, fw_reset = false;
+ int rc, retry = 0;
u32 flags = 0;
- int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -9346,10 +9525,25 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (up)
req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ while (retry < BNXT_FW_IF_RETRY) {
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc != -EAGAIN)
+ break;
+
+ msleep(50);
+ retry++;
+ }
if (!rc)
flags = le32_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc == -EAGAIN)
+ return rc;
+ if (rc && up) {
+ rc = bnxt_try_recover_fw(bp);
+ fw_reset = true;
+ }
if (rc)
return rc;
@@ -9689,6 +9883,25 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
+static int bnxt_reinit_after_abort(struct bnxt *bp)
+{
+ int rc;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EBUSY;
+
+ rc = bnxt_fw_init_one(bp);
+ if (!rc) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (!rc) {
+ clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ }
+ }
+ return rc;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -9847,8 +10060,14 @@ static int bnxt_open(struct net_device *dev)
int rc;
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
- netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
- return -ENODEV;
+ rc = bnxt_reinit_after_abort(bp);
+ if (rc) {
+ if (rc == -EBUSY)
+ netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
+ else
+ netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
+ return -ENODEV;
+ }
}
rc = bnxt_hwrm_if_change(bp, true);
@@ -10785,11 +11004,23 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- /* When firmware is fatal state, disable PCI device to prevent
- * any potential bad DMAs before freeing kernel memory.
+ /* When firmware is in fatal state, quiesce device and disable
+ * bus master to prevent any potential bad DMAs before freeing
+ * kernel memory.
*/
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ u16 val = 0;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ if (val == 0xffff)
+ bp->fw_reset_min_dsecs = 0;
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
+ }
__bnxt_close_nic(bp, true, false);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
@@ -10994,6 +11225,17 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
+static void bnxt_fw_echo_reply(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct hwrm_func_echo_response_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
+ req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
+ req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -11061,6 +11303,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
bnxt_chk_missed_irq(bp);
+ if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
+ bnxt_fw_echo_reply(bp);
+
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
@@ -11177,21 +11422,6 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
-static int bnxt_fw_reset_via_optee(struct bnxt *bp)
-{
-#ifdef CONFIG_TEE_BNXT_FW
- int rc = tee_bnxt_fw_load();
-
- if (rc)
- netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
-
- return rc;
-#else
- netdev_err(bp->dev, "OP-TEE not supported\n");
- return -ENODEV;
-#endif
-}
-
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -11200,19 +11430,10 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
rc = bnxt_hwrm_ver_get(bp);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
- if (bp->fw_health && bp->fw_health->status_reliable) {
- u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
-
- netdev_err(bp->dev,
- "Firmware not responding, status: 0x%x\n",
- sts);
- if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
- netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
- rc = bnxt_fw_reset_via_optee(bp);
- if (!rc)
- rc = bnxt_hwrm_ver_get(bp);
- }
- }
+ rc = bnxt_try_recover_fw(bp);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_ver_get(bp);
if (rc)
return rc;
}
@@ -11412,6 +11633,12 @@ static void bnxt_reset_all(struct bnxt *bp)
bp->fw_reset_timestamp = jiffies;
}
+static bool bnxt_fw_reset_timeout(struct bnxt *bp)
+{
+ return time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10));
+}
+
static void bnxt_fw_reset_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
@@ -11433,8 +11660,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_timestamp));
goto fw_reset_abort;
} else if (n > 0) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
@@ -11463,8 +11689,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
- !time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ !bnxt_fw_reset_timeout(bp)) {
bnxt_queue_fw_reset_work(bp, HZ / 5);
return;
}
@@ -11488,6 +11713,20 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
u32 val;
+ if (!bp->fw_reset_min_dsecs) {
+ u16 val;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID,
+ &val);
+ if (val == 0xffff) {
+ if (bnxt_fw_reset_timeout(bp)) {
+ netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ goto fw_reset_abort;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 1000);
+ return;
+ }
+ }
val = bnxt_fw_health_readl(bp,
BNXT_FW_RESET_INPROG_REG);
if (val)
@@ -11506,8 +11745,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
rc = __bnxt_hwrm_ver_get(bp, true);
if (rc) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
netdev_err(bp->dev, "Firmware reset aborted\n");
goto fw_reset_abort_status;
}
@@ -12088,8 +12326,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = bnxt_xdp,
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
@@ -12541,9 +12777,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
- if (BNXT_PF(bp))
- bnxt_vpd_read_info(bp);
-
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
goto init_err_pci_clean;
@@ -12555,6 +12788,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ if (BNXT_PF(bp))
+ bnxt_vpd_read_info(bp);
+
if (BNXT_CHIP_P5(bp)) {
bp->flags |= BNXT_FLAG_CHIP_P5;
if (BNXT_CHIP_SR2(bp))
@@ -12887,10 +13123,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
- pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12919,22 +13155,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
- if (!err) {
- err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
- }
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- }
- }
-
- if (result != PCI_ERS_RESULT_RECOVERED) {
- if (netif_running(netdev))
- dev_close(netdev);
- pci_disable_device(pdev);
}
rtnl_unlock();
@@ -12952,10 +13174,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err;
+ netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
- netif_device_attach(netdev);
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ netif_device_attach(netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 950ea26ae0d2..1259e68cba2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,7 +18,7 @@
*/
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 1
+#define DRV_VER_UPD 2
#include <linux/ethtool.h>
#include <linux/interrupt.h>
@@ -656,6 +656,7 @@ struct nqe_cn {
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
+#define HWRM_CMD_MAX_TIMEOUT 40000
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -713,6 +714,13 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
+struct bnxt_mem_init {
+ u8 init_val;
+ u16 offset;
+#define BNXT_MEM_INVALID_OFFSET 0xffff
+ u16 size;
+};
+
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
@@ -722,7 +730,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
- u8 init_val;
+ struct bnxt_mem_init *mem_init;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -1345,9 +1353,14 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
+#define BNXT_GRC_REG_STATUS_P5 0x520
+
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+#define BNXT_GRC_REG_CHIP_NUM 0x48
+#define BNXT_GRC_REG_BASE 0x260000
+
#define BNXT_GRC_BASE_MASK 0xfffff000
#define BNXT_GRC_OFFSET_MASK 0x00000ffc
@@ -1436,6 +1449,13 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_RINGS 8
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
+#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1461,7 +1481,6 @@ struct bnxt_ctx_mem_info {
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
- u8 ctx_kind_initializer;
u8 tqm_fp_rings_count;
u32 flags;
@@ -1474,7 +1493,16 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
+ struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
+
+#define BNXT_CTX_MEM_INIT_QP 0
+#define BNXT_CTX_MEM_INIT_SRQ 1
+#define BNXT_CTX_MEM_INIT_CQ 2
+#define BNXT_CTX_MEM_INIT_VNIC 3
+#define BNXT_CTX_MEM_INIT_STAT 4
+#define BNXT_CTX_MEM_INIT_MRAV 5
+#define BNXT_CTX_MEM_INIT_MAX 6
+ struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
};
struct bnxt_fw_health {
@@ -1503,6 +1531,8 @@ struct bnxt_fw_health {
u32 fw_reset_seq_regs[16];
u32 fw_reset_seq_vals[16];
u32 fw_reset_seq_delay_msec[16];
+ u32 echo_req_data1;
+ u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_reset_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
@@ -1527,9 +1557,22 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \
((reg) & BNXT_GRC_OFFSET_MASK))
+#define BNXT_FW_STATUS_HEALTH_MSK 0xffff
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
+#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_BOOTING(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) < \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_RETRY 5
+#define BNXT_FW_IF_RETRY 10
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1783,6 +1826,7 @@ struct bnxt {
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
+#define BNXT_STATE_NAPI_DISABLED 9
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1898,6 +1942,7 @@ struct bnxt {
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
+#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
struct delayed_work fw_reset_task;
int fw_reset_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 6b7b69ed62db..64381be935a8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -44,21 +44,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- u32 val, health_status;
+ u32 val;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return 0;
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- health_status = val & 0xffff;
- if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ if (BNXT_FW_IS_BOOTING(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Not yet completed initialization");
if (rc)
return rc;
- } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
+ } else if (BNXT_FW_IS_ERR(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Encountered fatal error and cannot recover");
if (rc)
@@ -472,8 +471,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
u32 ver = nvm_cfg_ver.vu32;
- sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
- ver & 0xF);
+ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
+ ver & 0xf);
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
buf);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9ff79d5d14c4..2f8b193a772d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2532,7 +2532,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
- install.flags |=
+ install.flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = _hwrm_send_message_silent(bp, &install,
@@ -2546,6 +2546,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
* UPDATE directory and try the flash again
*/
defrag_attempted = true;
+ install.flags = 0;
rc = __bnxt_flash_nvram(bp->dev,
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 2d3e962bdac3..6199f125bc13 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2020 Broadcom Inc.
+ * Copyright (c) 2018-2021 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -103,6 +103,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
#define HWRM_ERROR_RECOVERY_QCFG 0xcUL
#define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
@@ -164,6 +165,7 @@ struct cmd_nums {
#define HWRM_VNIC_PLCMODES_CFG 0x48UL
#define HWRM_VNIC_PLCMODES_QCFG 0x49UL
#define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
#define HWRM_RING_ALLOC 0x50UL
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
@@ -184,6 +186,9 @@ struct cmd_nums {
#define HWRM_QUEUE_MPLS_QCAPS 0x80UL
#define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
#define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -217,6 +222,8 @@ struct cmd_nums {
#define HWRM_PORT_TX_FIR_CFG 0xbbUL
#define HWRM_PORT_TX_FIR_QCFG 0xbcUL
#define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -347,6 +354,8 @@ struct cmd_nums {
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_FUNC_QSTATS_EXT 0x198UL
#define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -359,6 +368,11 @@ struct cmd_nums {
#define HWRM_MFG_HDMA_TEST 0x209UL
#define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
#define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
+ #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
+ #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -384,6 +398,7 @@ struct cmd_nums {
#define HWRM_TF_EXT_EM_QCFG 0x2e9UL
#define HWRM_TF_EM_INSERT 0x2eaUL
#define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
#define HWRM_TF_TCAM_SET 0x2f8UL
#define HWRM_TF_TCAM_GET 0x2f9UL
#define HWRM_TF_TCAM_MOVE 0x2faUL
@@ -486,9 +501,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 68
-#define HWRM_VERSION_STR "1.10.1.68"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 16
+#define HWRM_VERSION_STR "1.10.2.16"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -563,8 +578,9 @@ struct hwrm_ver_get_output {
__le16 max_resp_len;
__le16 def_req_timeout;
u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
u8 unused_0[2];
u8 always_1;
__le16 hwrm_intf_major;
@@ -708,6 +724,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x43UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -815,6 +833,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
__le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
u8 opaque_v;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
@@ -832,7 +852,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
@@ -1033,6 +1054,26 @@ struct hwrm_async_event_cmpl_deferred_response {
__le32 event_data1;
};
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1271,6 +1312,14 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1315,6 +1364,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
#define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1450,6 +1501,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1731,6 +1784,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1993,7 +2047,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
+/* hwrm_func_backing_store_qcaps_output (size:704b/88B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2024,13 +2078,49 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
- __le32 rsvd;
- __le16 rsvd1;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
u8 tqm_fp_rings_count;
- u8 valid;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 rsvd[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
};
-/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+/* hwrm_func_backing_store_cfg_input (size:2432b/304B) */
struct hwrm_func_backing_store_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2041,22 +2131,25 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
u8 qpc_pg_size_qpc_lvl;
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
@@ -2358,6 +2451,63 @@ struct hwrm_func_backing_store_cfg_input {
__le16 tqm_entry_size;
__le16 mrav_entry_size;
__le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
};
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -2463,6 +2613,27 @@ struct hwrm_error_recovery_qcfg_output {
u8 valid;
};
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -2930,6 +3101,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le16 support_pam4_speeds;
@@ -3528,8 +3700,8 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
#define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xc0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 6
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -4119,7 +4291,10 @@ struct hwrm_queue_qportcfg_output {
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
- u8 unused_0;
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
char qid0_name[16];
char qid1_name[16];
char qid2_name[16];
@@ -4128,7 +4303,34 @@ struct hwrm_queue_qportcfg_output {
char qid5_name[16];
char qid6_name[16];
char qid7_name[16];
- u8 unused_1[7];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
u8 valid;
};
@@ -5142,8 +5344,10 @@ struct hwrm_vnic_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- u8 unused_0[4];
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ __le16 virtio_net_fid;
+ u8 unused_0[2];
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -5260,6 +5464,9 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
#define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
#define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -5585,11 +5792,15 @@ struct hwrm_ring_alloc_output {
__le16 resp_len;
__le16 ring_id;
__le16 logical_ring_id;
- u8 unused_0[3];
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
u8 valid;
};
-/* hwrm_ring_free_input (size:192b/24B) */
+/* hwrm_ring_free_input (size:256b/32B) */
struct hwrm_ring_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5604,9 +5815,13 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
#define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
- u8 unused_0;
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
__le16 ring_id;
- u8 unused_1[4];
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
};
/* hwrm_ring_free_output (size:128b/16B) */
@@ -5644,7 +5859,11 @@ struct hwrm_ring_reset_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[4];
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
u8 consumer_idx[3];
u8 valid;
};
@@ -6988,21 +7207,23 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
u8 unused_0[3];
u8 valid;
};
@@ -7420,7 +7641,13 @@ struct hwrm_fw_qstatus_output {
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
- u8 unused_0[6];
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
u8 valid;
};
@@ -7472,7 +7699,8 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
__le16 len;
u8 version;
u8 count;
@@ -8000,6 +8228,9 @@ struct hwrm_dbg_coredump_initiate_output {
struct coredump_data_hdr {
__le32 address;
__le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
__le32 instance;
__le32 next_offset;
};
@@ -8669,7 +8900,6 @@ struct hcomm_status {
#define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
#define HCOMM_STATUS_TRUE_OFFSET_SFT 2
};
-
#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 8c8368c2f335..64dbbb04b043 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -222,8 +222,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- return BNXT_MIN_ROCE_STAT_CTXS;
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ return BNXT_MIN_ROCE_STAT_CTXS;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index fcc262064766..641303894341 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -133,12 +133,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
txr = rxr->bnapi->tx_ring;
- xdp.data_hard_start = *data_ptr - offset;
- xdp.data = *data_ptr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = *data_ptr + *len;
- xdp.rxq = &rxr->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
+ xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index f6ca01da141d..0a6d91b0f0aa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -16,6 +16,8 @@
#include <linux/dim.h>
#include <linux/ethtool.h>
+#include "../unimac.h"
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -150,63 +152,6 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
-#define UMAC_HD_BKP_CTRL 0x004
-#define HD_FC_EN (1 << 0)
-#define HD_FC_BKOFF_OK (1 << 1)
-#define IPG_CONFIG_RX_SHIFT 2
-#define IPG_CONFIG_RX_MASK 0x1F
-
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define UMAC_SPEED_10 0
-#define UMAC_SPEED_100 1
-#define UMAC_SPEED_1000 2
-#define UMAC_SPEED_2500 3
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00C
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_MODE 0x44
-#define MODE_LINK_STATUS (1 << 5)
-
-#define UMAC_EEE_CTRL 0x064
-#define EN_LPI_RX_PAUSE (1 << 0)
-#define EN_LPI_TX_PFC (1 << 1)
-#define EN_LPI_TX_PAUSE (1 << 2)
-#define EEE_EN (1 << 3)
-#define RX_FIFO_CHECK (1 << 4)
-#define EEE_TX_CLK_DIS (1 << 5)
-#define DIS_EEE_10M (1 << 6)
-#define LP_IDLE_PREDICTION_MODE (1 << 7)
-
-#define UMAC_EEE_LPI_TIMER 0x068
-#define UMAC_EEE_WAKE_TIMER 0x06C
-#define UMAC_EEE_REF_COUNT 0x070
-#define EEE_REFERENCE_COUNT_MASK 0xffff
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6fb6c3556285..5335244e4577 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -63,11 +63,11 @@ void bcmgenet_mii_setup(struct net_device *dev)
/* speed */
if (phydev->speed == SPEED_1000)
- cmd_bits = UMAC_SPEED_1000;
+ cmd_bits = CMD_SPEED_1000;
else if (phydev->speed == SPEED_100)
- cmd_bits = UMAC_SPEED_100;
+ cmd_bits = CMD_SPEED_100;
else
- cmd_bits = UMAC_SPEED_10;
+ cmd_bits = CMD_SPEED_10;
cmd_bits <<= CMD_SPEED_SHIFT;
/* duplex */
@@ -359,7 +359,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* those versions of GENET.
*/
if (priv->internal_phy && !GENET_IS_V5(priv))
- dev->phydev->irq = PHY_IGNORE_INTERRUPT;
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5143cdd0eeca..d2381929931b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1580,12 +1580,6 @@ static int tg3_mdio_init(struct tg3 *tp)
PHY_BRCM_RX_REFCLK_UNUSED |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_AUTO_PWRDWN_ENABLE;
- if (tg3_flag(tp, RGMII_INBAND_DISABLE))
- phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
- if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
- phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
- if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
- phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fallthrough;
case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
@@ -12826,11 +12820,13 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
offset = tg3_nvram_logical_addr(tp, offset);
}
- }
- if (!offset || !len) {
- offset = TG3_NVM_VPD_OFF;
- len = TG3_NVM_VPD_LEN;
+ if (!offset || !len) {
+ offset = TG3_NVM_VPD_OFF;
+ len = TG3_NVM_VPD_LEN;
+ }
+ } else {
+ len = TG3_NVM_PCI_VPD_MAX_LEN;
}
buf = kmalloc(len, GFP_KERNEL);
@@ -12846,26 +12842,16 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
goto error;
}
+ *vpdlen = len;
} else {
- u8 *ptr;
ssize_t cnt;
- unsigned int pos = 0;
-
- ptr = (u8 *)&buf[0];
- for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
- cnt = pci_read_vpd(tp->pdev, pos,
- len - pos, ptr);
- if (cnt == -ETIMEDOUT || cnt == -EINTR)
- cnt = 0;
- else if (cnt < 0)
- goto error;
- }
- if (pos != len)
+
+ cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
+ if (cnt < 0)
goto error;
+ *vpdlen = cnt;
}
- *vpdlen = len;
-
return buf;
error:
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1000c894064f..46ec4fdfd16a 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2101,6 +2101,7 @@
/* Hardware Legacy NVRAM layout */
#define TG3_NVM_VPD_OFF 0x100
#define TG3_NVM_VPD_LEN 256
+#define TG3_NVM_PCI_VPD_MAX_LEN 512
/* Hardware Selfboot NVRAM layout */
#define TG3_NVM_HWSB_CFG1 0x00000004
diff --git a/drivers/net/ethernet/broadcom/unimac.h b/drivers/net/ethernet/broadcom/unimac.h
new file mode 100644
index 000000000000..585a85286257
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/unimac.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __UNIMAC_H
+#define __UNIMAC_H
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET_OLD (1 << 11)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_RX_ERR_DISC (1 << 26)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_PAUSE_QUANTA 0x018
+#define UMAC_MODE 0x044
+#define MODE_LINK_STATUS (1 << 5)
+#define UMAC_FRM_TAG0 0x048 /* outer tag */
+#define UMAC_FRM_TAG1 0x04c /* inner tag */
+#define UMAC_TX_IPG_LEN 0x05c
+#define UMAC_EEE_CTRL 0x064
+#define EN_LPI_RX_PAUSE (1 << 0)
+#define EN_LPI_TX_PFC (1 << 1)
+#define EN_LPI_TX_PAUSE (1 << 2)
+#define EEE_EN (1 << 3)
+#define RX_FIFO_CHECK (1 << 4)
+#define EEE_TX_CLK_DIS (1 << 5)
+#define DIS_EEE_10M (1 << 6)
+#define LP_IDLE_PREDICTION_MODE (1 << 7)
+#define UMAC_EEE_LPI_TIMER 0x068
+#define UMAC_EEE_WAKE_TIMER 0x06C
+#define UMAC_EEE_REF_COUNT 0x070
+#define EEE_REFERENCE_COUNT_MASK 0xffff
+#define UMAC_RX_IPG_INV 0x078
+#define UMAC_MACSEC_PROG_TX_CRC 0x310
+#define UMAC_MACSEC_CTRL 0x314
+#define UMAC_PAUSE_CTRL 0x330
+#define UMAC_TX_FLUSH 0x334
+#define UMAC_RX_FIFO_STATUS 0x338
+#define UMAC_TX_FIFO_STATUS 0x33c
+
+#endif